2024-11-21 04:22:07,999 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-21 04:22:08,016 main DEBUG Took 0.014225 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-21 04:22:08,016 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-21 04:22:08,017 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-21 04:22:08,018 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-21 04:22:08,020 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,032 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-21 04:22:08,067 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,070 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,071 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,072 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,073 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,073 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,075 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,075 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,076 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,077 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,078 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,079 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,080 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,080 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,081 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,081 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,082 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,082 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,083 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,083 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,085 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,085 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,086 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,086 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 04:22:08,087 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,088 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-21 04:22:08,095 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 04:22:08,097 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-21 04:22:08,100 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-21 04:22:08,100 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-21 04:22:08,102 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-21 04:22:08,103 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-21 04:22:08,123 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-21 04:22:08,127 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-21 04:22:08,134 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-21 04:22:08,135 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-21 04:22:08,135 main DEBUG createAppenders(={Console}) 2024-11-21 04:22:08,137 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-11-21 04:22:08,137 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-21 04:22:08,138 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-11-21 04:22:08,140 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-21 04:22:08,141 main DEBUG OutputStream closed 2024-11-21 04:22:08,141 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-21 04:22:08,142 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-21 04:22:08,145 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-11-21 04:22:08,466 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-21 04:22:08,479 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-21 04:22:08,484 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-21 04:22:08,486 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-21 04:22:08,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-21 04:22:08,497 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-21 04:22:08,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-21 04:22:08,505 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-21 04:22:08,506 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-21 04:22:08,507 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-21 04:22:08,507 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-21 04:22:08,508 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-21 04:22:08,509 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-21 04:22:08,510 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-21 04:22:08,510 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-21 04:22:08,511 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-21 04:22:08,511 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-21 04:22:08,512 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-21 04:22:08,515 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-21 04:22:08,516 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-11-21 04:22:08,516 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-21 04:22:08,517 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-11-21T04:22:08,535 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-21 04:22:08,538 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-21 04:22:08,539 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-21T04:22:09,047 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece 2024-11-21T04:22:09,048 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-11-21T04:22:09,125 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-21T04:22:09,416 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T04:22:09,474 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424, deleteOnExit=true 2024-11-21T04:22:09,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T04:22:09,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/test.cache.data in system properties and HBase conf 2024-11-21T04:22:09,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T04:22:09,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir in system properties and HBase conf 2024-11-21T04:22:09,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T04:22:09,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T04:22:09,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T04:22:09,687 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T04:22:09,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T04:22:09,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T04:22:09,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T04:22:09,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T04:22:09,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T04:22:09,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T04:22:09,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T04:22:09,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T04:22:09,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T04:22:09,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/nfs.dump.dir in system properties and HBase conf 2024-11-21T04:22:09,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/java.io.tmpdir in system properties and HBase conf 2024-11-21T04:22:09,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T04:22:09,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T04:22:09,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T04:22:11,280 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-21T04:22:11,386 INFO [Time-limited test {}] log.Log(170): Logging initialized @4531ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-21T04:22:11,486 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:11,577 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T04:22:11,621 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T04:22:11,621 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T04:22:11,624 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T04:22:11,645 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:11,652 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74e30e0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,AVAILABLE} 2024-11-21T04:22:11,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d167fe8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T04:22:11,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a55f3e1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/java.io.tmpdir/jetty-localhost-44301-hadoop-hdfs-3_4_1-tests_jar-_-any-7752004065028815264/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T04:22:11,934 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44301} 2024-11-21T04:22:11,934 INFO [Time-limited test {}] server.Server(415): Started @5080ms 2024-11-21T04:22:14,057 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:14,068 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T04:22:14,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T04:22:14,070 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T04:22:14,070 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T04:22:14,076 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11dede43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,AVAILABLE} 2024-11-21T04:22:14,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18d9bb98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T04:22:14,231 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@746b5398{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/java.io.tmpdir/jetty-localhost-45091-hadoop-hdfs-3_4_1-tests_jar-_-any-225856641866715375/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T04:22:14,232 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@26b3760d{HTTP/1.1, (http/1.1)}{localhost:45091} 2024-11-21T04:22:14,232 INFO [Time-limited test {}] server.Server(415): Started @7378ms 2024-11-21T04:22:14,307 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T04:22:14,506 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:14,521 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T04:22:14,535 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T04:22:14,535 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T04:22:14,536 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T04:22:14,543 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76c81bc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,AVAILABLE} 2024-11-21T04:22:14,544 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f60738e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T04:22:14,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1cca72f0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/java.io.tmpdir/jetty-localhost-39919-hadoop-hdfs-3_4_1-tests_jar-_-any-17266555886471941767/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T04:22:14,691 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2f8fc5e5{HTTP/1.1, (http/1.1)}{localhost:39919} 2024-11-21T04:22:14,691 INFO [Time-limited test {}] server.Server(415): Started @7837ms 2024-11-21T04:22:14,694 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T04:22:14,761 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:14,768 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T04:22:14,776 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T04:22:14,776 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T04:22:14,776 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T04:22:14,779 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@451d3a5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,AVAILABLE} 2024-11-21T04:22:14,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f667f57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T04:22:14,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@606c6d1d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/java.io.tmpdir/jetty-localhost-43805-hadoop-hdfs-3_4_1-tests_jar-_-any-3597053041343706398/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T04:22:14,909 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ec9a36b{HTTP/1.1, (http/1.1)}{localhost:43805} 2024-11-21T04:22:14,909 INFO [Time-limited test {}] server.Server(415): Started @8055ms 2024-11-21T04:22:14,912 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T04:22:16,048 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2/current/BP-829356940-172.17.0.2-1732162930732/current, will proceed with Du for space computation calculation, 2024-11-21T04:22:16,048 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4/current/BP-829356940-172.17.0.2-1732162930732/current, will proceed with Du for space computation calculation, 2024-11-21T04:22:16,049 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3/current/BP-829356940-172.17.0.2-1732162930732/current, will proceed with Du for space computation calculation, 2024-11-21T04:22:16,050 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1/current/BP-829356940-172.17.0.2-1732162930732/current, will proceed with Du for space computation calculation, 2024-11-21T04:22:16,135 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T04:22:16,138 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T04:22:16,202 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5/current/BP-829356940-172.17.0.2-1732162930732/current, will proceed with Du for space computation calculation, 2024-11-21T04:22:16,203 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2224360f363139ae with lease ID 0x4b56ee57d9cfe01b: Processing first storage report for DS-843822a9-dcff-4e93-b597-54fc35d2cf04 from datanode DatanodeRegistration(127.0.0.1:42055, datanodeUuid=a1cb1ef9-fba7-4559-8d3c-2e34a1f1e090, infoPort=44489, infoSecurePort=0, ipcPort=37319, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732) 2024-11-21T04:22:16,204 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6/current/BP-829356940-172.17.0.2-1732162930732/current, will proceed with Du for space computation calculation, 2024-11-21T04:22:16,205 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2224360f363139ae with lease ID 0x4b56ee57d9cfe01b: from storage DS-843822a9-dcff-4e93-b597-54fc35d2cf04 node DatanodeRegistration(127.0.0.1:42055, datanodeUuid=a1cb1ef9-fba7-4559-8d3c-2e34a1f1e090, infoPort=44489, infoSecurePort=0, ipcPort=37319, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-21T04:22:16,206 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2224360f363139ae with lease ID 0x4b56ee57d9cfe01b: Processing first storage report for DS-c018db8c-ecb7-4d1b-b417-5a22626effc5 from datanode DatanodeRegistration(127.0.0.1:42055, datanodeUuid=a1cb1ef9-fba7-4559-8d3c-2e34a1f1e090, infoPort=44489, infoSecurePort=0, ipcPort=37319, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732) 2024-11-21T04:22:16,206 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2224360f363139ae with lease ID 0x4b56ee57d9cfe01b: from storage DS-c018db8c-ecb7-4d1b-b417-5a22626effc5 node DatanodeRegistration(127.0.0.1:42055, datanodeUuid=a1cb1ef9-fba7-4559-8d3c-2e34a1f1e090, infoPort=44489, infoSecurePort=0, ipcPort=37319, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T04:22:16,215 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3a6429233aa2faa1 with lease ID 0x4b56ee57d9cfe01c: Processing first storage report for DS-e1da3505-cf9b-4bb4-8cd9-49cb6837bc8e from datanode DatanodeRegistration(127.0.0.1:33643, datanodeUuid=7aba5fb8-7d9b-4bdf-a725-ba58273c4411, infoPort=42959, infoSecurePort=0, ipcPort=36971, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732) 2024-11-21T04:22:16,216 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a6429233aa2faa1 with lease ID 0x4b56ee57d9cfe01c: from storage DS-e1da3505-cf9b-4bb4-8cd9-49cb6837bc8e node DatanodeRegistration(127.0.0.1:33643, datanodeUuid=7aba5fb8-7d9b-4bdf-a725-ba58273c4411, infoPort=42959, infoSecurePort=0, ipcPort=36971, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T04:22:16,216 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3a6429233aa2faa1 with lease ID 0x4b56ee57d9cfe01c: Processing first storage report for DS-eba473b7-a0cc-47b1-8327-f319eedb7957 from datanode DatanodeRegistration(127.0.0.1:33643, datanodeUuid=7aba5fb8-7d9b-4bdf-a725-ba58273c4411, infoPort=42959, infoSecurePort=0, ipcPort=36971, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732) 2024-11-21T04:22:16,216 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a6429233aa2faa1 with lease ID 0x4b56ee57d9cfe01c: from storage DS-eba473b7-a0cc-47b1-8327-f319eedb7957 node DatanodeRegistration(127.0.0.1:33643, datanodeUuid=7aba5fb8-7d9b-4bdf-a725-ba58273c4411, infoPort=42959, infoSecurePort=0, ipcPort=36971, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T04:22:16,235 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T04:22:16,244 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x17c651605618c369 with lease ID 0x4b56ee57d9cfe01d: Processing first storage report for DS-592a7e82-9870-4bba-b814-b684c730bac4 from datanode DatanodeRegistration(127.0.0.1:38043, datanodeUuid=18c6b082-ea3f-4d32-a3a1-24c94ee7a26d, infoPort=34453, infoSecurePort=0, ipcPort=46183, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732) 2024-11-21T04:22:16,244 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x17c651605618c369 with lease ID 0x4b56ee57d9cfe01d: from storage DS-592a7e82-9870-4bba-b814-b684c730bac4 node DatanodeRegistration(127.0.0.1:38043, datanodeUuid=18c6b082-ea3f-4d32-a3a1-24c94ee7a26d, infoPort=34453, infoSecurePort=0, ipcPort=46183, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T04:22:16,245 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x17c651605618c369 with lease ID 0x4b56ee57d9cfe01d: Processing first storage report for DS-5fed1305-d086-4154-a44e-f8c7978566ea from datanode DatanodeRegistration(127.0.0.1:38043, datanodeUuid=18c6b082-ea3f-4d32-a3a1-24c94ee7a26d, infoPort=34453, infoSecurePort=0, ipcPort=46183, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732) 2024-11-21T04:22:16,245 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x17c651605618c369 with lease ID 0x4b56ee57d9cfe01d: from storage DS-5fed1305-d086-4154-a44e-f8c7978566ea node DatanodeRegistration(127.0.0.1:38043, datanodeUuid=18c6b082-ea3f-4d32-a3a1-24c94ee7a26d, infoPort=34453, infoSecurePort=0, ipcPort=46183, storageInfo=lv=-57;cid=testClusterID;nsid=2002944641;c=1732162930732), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T04:22:16,351 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece 2024-11-21T04:22:16,441 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/zookeeper_0, clientPort=60527, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T04:22:16,457 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60527 2024-11-21T04:22:16,470 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T04:22:16,474 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T04:22:16,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741825_1001 (size=7) 2024-11-21T04:22:16,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741825_1001 (size=7) 2024-11-21T04:22:16,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741825_1001 (size=7) 2024-11-21T04:22:16,859 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a with version=8 2024-11-21T04:22:16,860 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/hbase-staging 2024-11-21T04:22:17,016 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-21T04:22:17,299 INFO [Time-limited test {}] client.ConnectionUtils(128): master/fdc0a393186c:0 server-side Connection retries=45 2024-11-21T04:22:17,309 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:17,310 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:17,315 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T04:22:17,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:17,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T04:22:17,485 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T04:22:17,565 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-21T04:22:17,577 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-21T04:22:17,580 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T04:22:17,605 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 110042 (auto-detected) 2024-11-21T04:22:17,606 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-21T04:22:17,625 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39577 2024-11-21T04:22:17,649 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39577 connecting to ZooKeeper ensemble=127.0.0.1:60527 2024-11-21T04:22:17,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:395770x0, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T04:22:17,753 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39577-0x1015cea71210000 connected 2024-11-21T04:22:17,843 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T04:22:17,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T04:22:17,859 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T04:22:17,864 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a, hbase.cluster.distributed=false 2024-11-21T04:22:17,922 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T04:22:17,940 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39577 2024-11-21T04:22:17,943 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39577 2024-11-21T04:22:17,947 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39577 2024-11-21T04:22:17,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39577 2024-11-21T04:22:17,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39577 2024-11-21T04:22:18,071 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fdc0a393186c:0 server-side Connection retries=45 2024-11-21T04:22:18,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:18,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:18,073 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T04:22:18,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:18,074 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T04:22:18,076 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T04:22:18,080 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T04:22:18,087 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39347 2024-11-21T04:22:18,089 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39347 connecting to ZooKeeper ensemble=127.0.0.1:60527 2024-11-21T04:22:18,091 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T04:22:18,095 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T04:22:18,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:393470x0, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T04:22:18,125 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39347-0x1015cea71210001 connected 2024-11-21T04:22:18,125 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T04:22:18,131 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T04:22:18,142 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T04:22:18,145 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T04:22:18,151 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T04:22:18,154 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39347 2024-11-21T04:22:18,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39347 2024-11-21T04:22:18,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39347 2024-11-21T04:22:18,159 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39347 2024-11-21T04:22:18,161 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39347 2024-11-21T04:22:18,186 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fdc0a393186c:0 server-side Connection retries=45 2024-11-21T04:22:18,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:18,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:18,187 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T04:22:18,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:18,187 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T04:22:18,187 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T04:22:18,188 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T04:22:18,189 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34363 2024-11-21T04:22:18,191 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34363 connecting to ZooKeeper ensemble=127.0.0.1:60527 2024-11-21T04:22:18,192 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T04:22:18,195 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T04:22:18,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:343630x0, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T04:22:18,235 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:343630x0, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T04:22:18,235 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34363-0x1015cea71210002 connected 2024-11-21T04:22:18,235 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T04:22:18,236 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T04:22:18,237 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T04:22:18,239 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T04:22:18,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34363 2024-11-21T04:22:18,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34363 2024-11-21T04:22:18,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34363 2024-11-21T04:22:18,244 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34363 2024-11-21T04:22:18,246 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34363 2024-11-21T04:22:18,269 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fdc0a393186c:0 server-side Connection retries=45 2024-11-21T04:22:18,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:18,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:18,270 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T04:22:18,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T04:22:18,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T04:22:18,271 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T04:22:18,271 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T04:22:18,275 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42393 2024-11-21T04:22:18,278 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42393 connecting to ZooKeeper ensemble=127.0.0.1:60527 2024-11-21T04:22:18,279 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T04:22:18,282 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T04:22:18,301 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:423930x0, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T04:22:18,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423930x0, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T04:22:18,302 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T04:22:18,307 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42393-0x1015cea71210003 connected 2024-11-21T04:22:18,308 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T04:22:18,309 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-21T04:22:18,315 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-21T04:22:18,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42393 2024-11-21T04:22:18,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42393 2024-11-21T04:22:18,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42393 2024-11-21T04:22:18,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42393 2024-11-21T04:22:18,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42393 2024-11-21T04:22:18,348 DEBUG [M:0;fdc0a393186c:39577 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;fdc0a393186c:39577 2024-11-21T04:22:18,350 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/fdc0a393186c,39577,1732162937127 2024-11-21T04:22:18,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T04:22:18,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T04:22:18,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T04:22:18,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T04:22:18,370 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/fdc0a393186c,39577,1732162937127 2024-11-21T04:22:18,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T04:22:18,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:18,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T04:22:18,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:18,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:18,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-21T04:22:18,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:18,405 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-21T04:22:18,406 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/fdc0a393186c,39577,1732162937127 from backup master directory 2024-11-21T04:22:18,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T04:22:18,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T04:22:18,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/fdc0a393186c,39577,1732162937127 2024-11-21T04:22:18,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T04:22:18,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-21T04:22:18,421 WARN [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T04:22:18,421 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=fdc0a393186c,39577,1732162937127 2024-11-21T04:22:18,423 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-21T04:22:18,425 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-21T04:22:18,506 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/hbase.id] with ID: 572b726e-d928-426e-9114-1cd37decf96c 2024-11-21T04:22:18,506 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.tmp/hbase.id 2024-11-21T04:22:18,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741826_1002 (size=42) 2024-11-21T04:22:18,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741826_1002 (size=42) 2024-11-21T04:22:18,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741826_1002 (size=42) 2024-11-21T04:22:18,562 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.tmp/hbase.id]:[hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/hbase.id] 2024-11-21T04:22:18,655 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T04:22:18,663 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T04:22:18,691 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 25ms. 2024-11-21T04:22:18,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:18,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:18,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:18,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:18,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741827_1003 (size=196) 2024-11-21T04:22:18,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741827_1003 (size=196) 2024-11-21T04:22:18,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741827_1003 (size=196) 2024-11-21T04:22:18,763 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:22:18,766 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T04:22:18,794 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T04:22:18,799 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T04:22:18,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741828_1004 (size=1189) 2024-11-21T04:22:18,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741828_1004 (size=1189) 2024-11-21T04:22:18,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741828_1004 (size=1189) 2024-11-21T04:22:18,868 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/data/master/store 2024-11-21T04:22:18,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741829_1005 (size=34) 2024-11-21T04:22:18,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741829_1005 (size=34) 2024-11-21T04:22:18,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741829_1005 (size=34) 2024-11-21T04:22:18,908 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-21T04:22:18,911 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:18,912 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T04:22:18,913 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T04:22:18,913 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T04:22:18,914 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T04:22:18,914 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T04:22:18,914 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T04:22:18,915 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732162938912Disabling compacts and flushes for region at 1732162938912Disabling writes for close at 1732162938914 (+2 ms)Writing region close event to WAL at 1732162938914Closed at 1732162938914 2024-11-21T04:22:18,917 WARN [master/fdc0a393186c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/data/master/store/.initializing 2024-11-21T04:22:18,918 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/WALs/fdc0a393186c,39577,1732162937127 2024-11-21T04:22:18,928 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T04:22:18,953 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fdc0a393186c%2C39577%2C1732162937127, suffix=, logDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/WALs/fdc0a393186c,39577,1732162937127, archiveDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/oldWALs, maxLogs=10 2024-11-21T04:22:18,992 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/WALs/fdc0a393186c,39577,1732162937127/fdc0a393186c%2C39577%2C1732162937127.1732162938960, exclude list is [], retry=0 2024-11-21T04:22:19,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33643,DS-e1da3505-cf9b-4bb4-8cd9-49cb6837bc8e,DISK] 2024-11-21T04:22:19,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38043,DS-592a7e82-9870-4bba-b814-b684c730bac4,DISK] 2024-11-21T04:22:19,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42055,DS-843822a9-dcff-4e93-b597-54fc35d2cf04,DISK] 2024-11-21T04:22:19,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-21T04:22:19,070 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/WALs/fdc0a393186c,39577,1732162937127/fdc0a393186c%2C39577%2C1732162937127.1732162938960 2024-11-21T04:22:19,071 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42959:42959),(127.0.0.1/127.0.0.1:34453:34453),(127.0.0.1/127.0.0.1:44489:44489)] 2024-11-21T04:22:19,071 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T04:22:19,072 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:19,076 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,078 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,132 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,161 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T04:22:19,166 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:19,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T04:22:19,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T04:22:19,174 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:19,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:22:19,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T04:22:19,180 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:19,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:22:19,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,186 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T04:22:19,186 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:19,188 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:22:19,188 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,194 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,196 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,203 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,204 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,209 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T04:22:19,214 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T04:22:19,220 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:22:19,222 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74825282, jitterRate=0.11498358845710754}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T04:22:19,231 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732162939098Initializing all the Stores at 1732162939101 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732162939102 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162939102Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162939103 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162939103Cleaning up temporary data from old regions at 1732162939204 (+101 ms)Region opened successfully at 1732162939231 (+27 ms) 2024-11-21T04:22:19,233 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T04:22:19,279 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6da5fabb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fdc0a393186c/172.17.0.2:0 2024-11-21T04:22:19,322 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T04:22:19,336 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T04:22:19,336 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T04:22:19,340 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T04:22:19,342 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 2 msec 2024-11-21T04:22:19,349 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-11-21T04:22:19,349 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T04:22:19,384 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T04:22:19,396 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-21T04:22:19,432 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-21T04:22:19,435 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T04:22:19,437 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-21T04:22:19,448 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-21T04:22:19,452 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T04:22:19,455 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-21T04:22:19,466 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-21T04:22:19,468 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-21T04:22:19,474 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-21T04:22:19,491 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T04:22:19,499 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-21T04:22:19,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T04:22:19,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T04:22:19,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T04:22:19,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-21T04:22:19,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,513 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=fdc0a393186c,39577,1732162937127, sessionid=0x1015cea71210000, setting cluster-up flag (Was=false) 2024-11-21T04:22:19,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,566 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-21T04:22:19,569 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fdc0a393186c,39577,1732162937127 2024-11-21T04:22:19,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:19,616 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-21T04:22:19,617 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fdc0a393186c,39577,1732162937127 2024-11-21T04:22:19,625 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T04:22:19,637 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(746): ClusterId : 572b726e-d928-426e-9114-1cd37decf96c 2024-11-21T04:22:19,637 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(746): ClusterId : 572b726e-d928-426e-9114-1cd37decf96c 2024-11-21T04:22:19,638 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(746): ClusterId : 572b726e-d928-426e-9114-1cd37decf96c 2024-11-21T04:22:19,640 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T04:22:19,640 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T04:22:19,640 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T04:22:19,654 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T04:22:19,654 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T04:22:19,654 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T04:22:19,654 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T04:22:19,654 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T04:22:19,655 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T04:22:19,667 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T04:22:19,667 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T04:22:19,668 DEBUG [RS:1;fdc0a393186c:34363 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c942638, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fdc0a393186c/172.17.0.2:0 2024-11-21T04:22:19,668 DEBUG [RS:0;fdc0a393186c:39347 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e406c4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fdc0a393186c/172.17.0.2:0 2024-11-21T04:22:19,668 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T04:22:19,669 DEBUG [RS:2;fdc0a393186c:42393 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b4ff399, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fdc0a393186c/172.17.0.2:0 2024-11-21T04:22:19,671 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-21T04:22:19,677 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:22:19,678 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-21T04:22:19,692 DEBUG [RS:1;fdc0a393186c:34363 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;fdc0a393186c:34363 2024-11-21T04:22:19,693 DEBUG [RS:2;fdc0a393186c:42393 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;fdc0a393186c:42393 2024-11-21T04:22:19,694 DEBUG [RS:0;fdc0a393186c:39347 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;fdc0a393186c:39347 2024-11-21T04:22:19,697 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T04:22:19,697 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T04:22:19,697 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T04:22:19,697 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T04:22:19,697 DEBUG [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-21T04:22:19,697 DEBUG [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-21T04:22:19,698 INFO [RS:2;fdc0a393186c:42393 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:22:19,698 INFO [RS:1;fdc0a393186c:34363 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:22:19,698 DEBUG [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T04:22:19,698 DEBUG [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T04:22:19,701 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(2659): reportForDuty to master=fdc0a393186c,39577,1732162937127 with port=42393, startcode=1732162938268 2024-11-21T04:22:19,701 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(2659): reportForDuty to master=fdc0a393186c,39577,1732162937127 with port=34363, startcode=1732162938185 2024-11-21T04:22:19,703 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T04:22:19,703 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T04:22:19,703 DEBUG [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-21T04:22:19,704 INFO [RS:0;fdc0a393186c:39347 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:22:19,704 DEBUG [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T04:22:19,709 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(2659): reportForDuty to master=fdc0a393186c,39577,1732162937127 with port=39347, startcode=1732162938035 2024-11-21T04:22:19,718 DEBUG [RS:0;fdc0a393186c:39347 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T04:22:19,718 DEBUG [RS:1;fdc0a393186c:34363 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T04:22:19,719 DEBUG [RS:2;fdc0a393186c:42393 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T04:22:19,761 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T04:22:19,768 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53399, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T04:22:19,771 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46869, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T04:22:19,771 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50143, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T04:22:19,776 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T04:22:19,776 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-21T04:22:19,783 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-21T04:22:19,784 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T04:22:19,784 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-21T04:22:19,791 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: fdc0a393186c,39577,1732162937127 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T04:22:19,799 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/fdc0a393186c:0, corePoolSize=5, maxPoolSize=5 2024-11-21T04:22:19,799 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/fdc0a393186c:0, corePoolSize=5, maxPoolSize=5 2024-11-21T04:22:19,799 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/fdc0a393186c:0, corePoolSize=5, maxPoolSize=5 2024-11-21T04:22:19,799 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/fdc0a393186c:0, corePoolSize=5, maxPoolSize=5 2024-11-21T04:22:19,800 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/fdc0a393186c:0, corePoolSize=10, maxPoolSize=10 2024-11-21T04:22:19,800 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:19,800 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/fdc0a393186c:0, corePoolSize=2, maxPoolSize=2 2024-11-21T04:22:19,800 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:19,810 DEBUG [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-21T04:22:19,810 DEBUG [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-21T04:22:19,810 DEBUG [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-21T04:22:19,810 WARN [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-21T04:22:19,810 WARN [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-21T04:22:19,810 WARN [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-21T04:22:19,811 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T04:22:19,811 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732162969811 2024-11-21T04:22:19,811 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T04:22:19,812 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T04:22:19,814 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T04:22:19,817 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T04:22:19,817 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T04:22:19,818 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T04:22:19,818 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T04:22:19,818 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:19,819 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T04:22:19,821 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:19,826 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T04:22:19,827 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T04:22:19,828 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T04:22:19,831 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T04:22:19,832 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T04:22:19,834 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/fdc0a393186c:0:becomeActiveMaster-HFileCleaner.large.0-1732162939833,5,FailOnTimeoutGroup] 2024-11-21T04:22:19,836 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/fdc0a393186c:0:becomeActiveMaster-HFileCleaner.small.0-1732162939834,5,FailOnTimeoutGroup] 2024-11-21T04:22:19,836 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:19,836 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T04:22:19,838 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:19,838 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:19,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741831_1007 (size=1321) 2024-11-21T04:22:19,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741831_1007 (size=1321) 2024-11-21T04:22:19,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741831_1007 (size=1321) 2024-11-21T04:22:19,886 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T04:22:19,887 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:19,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741832_1008 (size=32) 2024-11-21T04:22:19,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741832_1008 (size=32) 2024-11-21T04:22:19,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741832_1008 (size=32) 2024-11-21T04:22:19,912 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(2659): reportForDuty to master=fdc0a393186c,39577,1732162937127 with port=34363, startcode=1732162938185 2024-11-21T04:22:19,912 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(2659): reportForDuty to master=fdc0a393186c,39577,1732162937127 with port=39347, startcode=1732162938035 2024-11-21T04:22:19,913 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(2659): reportForDuty to master=fdc0a393186c,39577,1732162937127 with port=42393, startcode=1732162938268 2024-11-21T04:22:19,913 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:19,914 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fdc0a393186c,39347,1732162938035 2024-11-21T04:22:19,917 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577 {}] master.ServerManager(517): Registering regionserver=fdc0a393186c,39347,1732162938035 2024-11-21T04:22:19,931 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T04:22:19,931 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fdc0a393186c,42393,1732162938268 2024-11-21T04:22:19,931 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577 {}] master.ServerManager(517): Registering regionserver=fdc0a393186c,42393,1732162938268 2024-11-21T04:22:19,931 DEBUG [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:19,931 DEBUG [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44067 2024-11-21T04:22:19,931 DEBUG [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T04:22:19,935 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T04:22:19,935 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:19,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T04:22:19,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T04:22:19,941 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T04:22:19,941 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:19,942 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fdc0a393186c,34363,1732162938185 2024-11-21T04:22:19,942 DEBUG [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:19,942 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577 {}] master.ServerManager(517): Registering regionserver=fdc0a393186c,34363,1732162938185 2024-11-21T04:22:19,942 DEBUG [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44067 2024-11-21T04:22:19,942 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T04:22:19,942 DEBUG [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T04:22:19,942 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T04:22:19,945 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T04:22:19,946 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:19,946 DEBUG [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:19,946 DEBUG [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44067 2024-11-21T04:22:19,946 DEBUG [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T04:22:19,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T04:22:19,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T04:22:19,952 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T04:22:19,952 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:19,953 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T04:22:19,954 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T04:22:19,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740 2024-11-21T04:22:19,956 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740 2024-11-21T04:22:19,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T04:22:19,959 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T04:22:19,959 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T04:22:19,961 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T04:22:19,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T04:22:19,970 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:22:19,971 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65404555, jitterRate=-0.025396183133125305}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T04:22:19,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732162939913Initializing all the Stores at 1732162939921 (+8 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732162939921Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732162939930 (+9 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162939931 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732162939931Cleaning up temporary data from old regions at 1732162939959 (+28 ms)Region opened successfully at 1732162939975 (+16 ms) 2024-11-21T04:22:19,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T04:22:19,975 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T04:22:19,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T04:22:19,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T04:22:19,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T04:22:19,977 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T04:22:19,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732162939975Disabling compacts and flushes for region at 1732162939975Disabling writes for close at 1732162939975Writing region close event to WAL at 1732162939976 (+1 ms)Closed at 1732162939977 (+1 ms) 2024-11-21T04:22:19,980 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T04:22:19,980 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T04:22:19,986 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T04:22:19,995 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T04:22:19,998 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T04:22:20,001 DEBUG [RS:0;fdc0a393186c:39347 {}] zookeeper.ZKUtil(111): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fdc0a393186c,39347,1732162938035 2024-11-21T04:22:20,002 WARN [RS:0;fdc0a393186c:39347 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T04:22:20,002 INFO [RS:0;fdc0a393186c:39347 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T04:22:20,002 DEBUG [RS:1;fdc0a393186c:34363 {}] zookeeper.ZKUtil(111): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fdc0a393186c,34363,1732162938185 2024-11-21T04:22:20,002 DEBUG [RS:2;fdc0a393186c:42393 {}] zookeeper.ZKUtil(111): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fdc0a393186c,42393,1732162938268 2024-11-21T04:22:20,002 WARN [RS:1;fdc0a393186c:34363 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T04:22:20,002 WARN [RS:2;fdc0a393186c:42393 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T04:22:20,002 INFO [RS:2;fdc0a393186c:42393 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T04:22:20,003 DEBUG [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,42393,1732162938268 2024-11-21T04:22:20,002 INFO [RS:1;fdc0a393186c:34363 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T04:22:20,004 DEBUG [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,34363,1732162938185 2024-11-21T04:22:20,004 DEBUG [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,39347,1732162938035 2024-11-21T04:22:20,005 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fdc0a393186c,39347,1732162938035] 2024-11-21T04:22:20,005 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fdc0a393186c,34363,1732162938185] 2024-11-21T04:22:20,005 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fdc0a393186c,42393,1732162938268] 2024-11-21T04:22:20,036 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T04:22:20,036 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T04:22:20,036 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T04:22:20,065 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T04:22:20,065 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T04:22:20,065 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T04:22:20,073 INFO [RS:0;fdc0a393186c:39347 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T04:22:20,073 INFO [RS:2;fdc0a393186c:42393 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T04:22:20,074 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,074 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,084 INFO [RS:1;fdc0a393186c:34363 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T04:22:20,084 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,086 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T04:22:20,086 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T04:22:20,087 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T04:22:20,093 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T04:22:20,095 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,095 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,095 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,095 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,095 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,095 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,096 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fdc0a393186c:0, corePoolSize=2, maxPoolSize=2 2024-11-21T04:22:20,096 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,096 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,096 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,096 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,097 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T04:22:20,097 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,097 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,097 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0, corePoolSize=3, maxPoolSize=3 2024-11-21T04:22:20,097 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,097 DEBUG [RS:1;fdc0a393186c:34363 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fdc0a393186c:0, corePoolSize=3, maxPoolSize=3 2024-11-21T04:22:20,097 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,097 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,097 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,097 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,098 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,098 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fdc0a393186c:0, corePoolSize=2, maxPoolSize=2 2024-11-21T04:22:20,098 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,098 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T04:22:20,098 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,099 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,099 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,099 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,099 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,099 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,099 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fdc0a393186c:0, corePoolSize=2, maxPoolSize=2 2024-11-21T04:22:20,099 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,099 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,099 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,100 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,100 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,100 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,100 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0, corePoolSize=3, maxPoolSize=3 2024-11-21T04:22:20,100 DEBUG [RS:2;fdc0a393186c:42393 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fdc0a393186c:0, corePoolSize=3, maxPoolSize=3 2024-11-21T04:22:20,100 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,101 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,101 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,102 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,102 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fdc0a393186c:0, corePoolSize=1, maxPoolSize=1 2024-11-21T04:22:20,102 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0, corePoolSize=3, maxPoolSize=3 2024-11-21T04:22:20,102 DEBUG [RS:0;fdc0a393186c:39347 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fdc0a393186c:0, corePoolSize=3, maxPoolSize=3 2024-11-21T04:22:20,114 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,114 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,114 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,114 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,114 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,114 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,34363,1732162938185-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T04:22:20,115 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,115 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,115 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,115 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,115 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,115 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,42393,1732162938268-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T04:22:20,116 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,116 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,116 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,116 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,116 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,117 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,39347,1732162938035-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T04:22:20,149 WARN [fdc0a393186c:39577 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-21T04:22:20,150 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T04:22:20,150 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T04:22:20,153 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,42393,1732162938268-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,154 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,34363,1732162938185-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,154 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,154 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.Replication(171): fdc0a393186c,42393,1732162938268 started 2024-11-21T04:22:20,154 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,154 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.Replication(171): fdc0a393186c,34363,1732162938185 started 2024-11-21T04:22:20,163 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T04:22:20,163 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,39347,1732162938035-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,163 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,164 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.Replication(171): fdc0a393186c,39347,1732162938035 started 2024-11-21T04:22:20,180 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,181 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(1482): Serving as fdc0a393186c,42393,1732162938268, RpcServer on fdc0a393186c/172.17.0.2:42393, sessionid=0x1015cea71210003 2024-11-21T04:22:20,182 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T04:22:20,182 DEBUG [RS:2;fdc0a393186c:42393 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fdc0a393186c,42393,1732162938268 2024-11-21T04:22:20,182 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fdc0a393186c,42393,1732162938268' 2024-11-21T04:22:20,183 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T04:22:20,183 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,184 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(1482): Serving as fdc0a393186c,34363,1732162938185, RpcServer on fdc0a393186c/172.17.0.2:34363, sessionid=0x1015cea71210002 2024-11-21T04:22:20,184 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T04:22:20,184 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T04:22:20,184 DEBUG [RS:1;fdc0a393186c:34363 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fdc0a393186c,34363,1732162938185 2024-11-21T04:22:20,184 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fdc0a393186c,34363,1732162938185' 2024-11-21T04:22:20,184 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T04:22:20,185 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T04:22:20,185 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T04:22:20,186 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T04:22:20,186 DEBUG [RS:2;fdc0a393186c:42393 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fdc0a393186c,42393,1732162938268 2024-11-21T04:22:20,186 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fdc0a393186c,42393,1732162938268' 2024-11-21T04:22:20,186 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T04:22:20,187 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T04:22:20,187 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T04:22:20,187 DEBUG [RS:1;fdc0a393186c:34363 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fdc0a393186c,34363,1732162938185 2024-11-21T04:22:20,187 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T04:22:20,187 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fdc0a393186c,34363,1732162938185' 2024-11-21T04:22:20,187 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T04:22:20,188 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T04:22:20,188 DEBUG [RS:2;fdc0a393186c:42393 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T04:22:20,189 INFO [RS:2;fdc0a393186c:42393 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T04:22:20,189 INFO [RS:2;fdc0a393186c:42393 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T04:22:20,190 DEBUG [RS:1;fdc0a393186c:34363 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T04:22:20,190 INFO [RS:1;fdc0a393186c:34363 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T04:22:20,190 INFO [RS:1;fdc0a393186c:34363 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T04:22:20,194 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:20,194 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(1482): Serving as fdc0a393186c,39347,1732162938035, RpcServer on fdc0a393186c/172.17.0.2:39347, sessionid=0x1015cea71210001 2024-11-21T04:22:20,194 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T04:22:20,194 DEBUG [RS:0;fdc0a393186c:39347 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fdc0a393186c,39347,1732162938035 2024-11-21T04:22:20,195 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fdc0a393186c,39347,1732162938035' 2024-11-21T04:22:20,195 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-21T04:22:20,196 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-21T04:22:20,196 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T04:22:20,196 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T04:22:20,197 DEBUG [RS:0;fdc0a393186c:39347 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fdc0a393186c,39347,1732162938035 2024-11-21T04:22:20,197 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fdc0a393186c,39347,1732162938035' 2024-11-21T04:22:20,197 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-21T04:22:20,197 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-21T04:22:20,198 DEBUG [RS:0;fdc0a393186c:39347 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T04:22:20,198 INFO [RS:0;fdc0a393186c:39347 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T04:22:20,198 INFO [RS:0;fdc0a393186c:39347 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T04:22:20,296 INFO [RS:1;fdc0a393186c:34363 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T04:22:20,296 INFO [RS:2;fdc0a393186c:42393 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T04:22:20,299 INFO [RS:0;fdc0a393186c:39347 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T04:22:20,300 INFO [RS:1;fdc0a393186c:34363 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fdc0a393186c%2C34363%2C1732162938185, suffix=, logDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,34363,1732162938185, archiveDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/oldWALs, maxLogs=32 2024-11-21T04:22:20,306 INFO [RS:0;fdc0a393186c:39347 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fdc0a393186c%2C39347%2C1732162938035, suffix=, logDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,39347,1732162938035, archiveDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/oldWALs, maxLogs=32 2024-11-21T04:22:20,311 INFO [RS:2;fdc0a393186c:42393 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fdc0a393186c%2C42393%2C1732162938268, suffix=, logDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,42393,1732162938268, archiveDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/oldWALs, maxLogs=32 2024-11-21T04:22:20,326 DEBUG [RS:0;fdc0a393186c:39347 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,39347,1732162938035/fdc0a393186c%2C39347%2C1732162938035.1732162940308, exclude list is [], retry=0 2024-11-21T04:22:20,326 DEBUG [RS:1;fdc0a393186c:34363 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,34363,1732162938185/fdc0a393186c%2C34363%2C1732162938185.1732162940303, exclude list is [], retry=0 2024-11-21T04:22:20,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38043,DS-592a7e82-9870-4bba-b814-b684c730bac4,DISK] 2024-11-21T04:22:20,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42055,DS-843822a9-dcff-4e93-b597-54fc35d2cf04,DISK] 2024-11-21T04:22:20,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33643,DS-e1da3505-cf9b-4bb4-8cd9-49cb6837bc8e,DISK] 2024-11-21T04:22:20,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38043,DS-592a7e82-9870-4bba-b814-b684c730bac4,DISK] 2024-11-21T04:22:20,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33643,DS-e1da3505-cf9b-4bb4-8cd9-49cb6837bc8e,DISK] 2024-11-21T04:22:20,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42055,DS-843822a9-dcff-4e93-b597-54fc35d2cf04,DISK] 2024-11-21T04:22:20,391 DEBUG [RS:2;fdc0a393186c:42393 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,42393,1732162938268/fdc0a393186c%2C42393%2C1732162938268.1732162940313, exclude list is [], retry=0 2024-11-21T04:22:20,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38043,DS-592a7e82-9870-4bba-b814-b684c730bac4,DISK] 2024-11-21T04:22:20,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42055,DS-843822a9-dcff-4e93-b597-54fc35d2cf04,DISK] 2024-11-21T04:22:20,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33643,DS-e1da3505-cf9b-4bb4-8cd9-49cb6837bc8e,DISK] 2024-11-21T04:22:20,407 INFO [RS:0;fdc0a393186c:39347 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,39347,1732162938035/fdc0a393186c%2C39347%2C1732162938035.1732162940308 2024-11-21T04:22:20,419 DEBUG [RS:0;fdc0a393186c:39347 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34453:34453),(127.0.0.1/127.0.0.1:44489:44489),(127.0.0.1/127.0.0.1:42959:42959)] 2024-11-21T04:22:20,419 INFO [RS:1;fdc0a393186c:34363 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,34363,1732162938185/fdc0a393186c%2C34363%2C1732162938185.1732162940303 2024-11-21T04:22:20,450 INFO [RS:2;fdc0a393186c:42393 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,42393,1732162938268/fdc0a393186c%2C42393%2C1732162938268.1732162940313 2024-11-21T04:22:20,451 DEBUG [RS:1;fdc0a393186c:34363 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34453:34453),(127.0.0.1/127.0.0.1:44489:44489),(127.0.0.1/127.0.0.1:42959:42959)] 2024-11-21T04:22:20,458 DEBUG [RS:2;fdc0a393186c:42393 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42959:42959),(127.0.0.1/127.0.0.1:44489:44489),(127.0.0.1/127.0.0.1:34453:34453)] 2024-11-21T04:22:20,653 DEBUG [fdc0a393186c:39577 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-21T04:22:20,663 DEBUG [fdc0a393186c:39577 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:22:20,677 DEBUG [fdc0a393186c:39577 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:22:20,677 DEBUG [fdc0a393186c:39577 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:22:20,677 DEBUG [fdc0a393186c:39577 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:22:20,677 DEBUG [fdc0a393186c:39577 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:22:20,677 DEBUG [fdc0a393186c:39577 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:22:20,677 DEBUG [fdc0a393186c:39577 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:22:20,677 INFO [fdc0a393186c:39577 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:22:20,677 INFO [fdc0a393186c:39577 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:22:20,678 INFO [fdc0a393186c:39577 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:22:20,678 DEBUG [fdc0a393186c:39577 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:22:20,687 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:22:20,693 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fdc0a393186c,34363,1732162938185, state=OPENING 2024-11-21T04:22:20,716 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T04:22:20,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:20,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:20,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:20,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:20,726 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T04:22:20,727 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T04:22:20,727 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T04:22:20,727 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T04:22:20,727 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T04:22:20,729 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:22:20,911 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T04:22:20,914 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59939, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T04:22:20,931 INFO [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T04:22:20,931 INFO [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T04:22:20,933 INFO [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T04:22:20,938 INFO [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fdc0a393186c%2C34363%2C1732162938185.meta, suffix=.meta, logDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,34363,1732162938185, archiveDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/oldWALs, maxLogs=32 2024-11-21T04:22:20,962 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,34363,1732162938185/fdc0a393186c%2C34363%2C1732162938185.meta.1732162940941.meta, exclude list is [], retry=0 2024-11-21T04:22:20,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38043,DS-592a7e82-9870-4bba-b814-b684c730bac4,DISK] 2024-11-21T04:22:20,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42055,DS-843822a9-dcff-4e93-b597-54fc35d2cf04,DISK] 2024-11-21T04:22:20,971 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33643,DS-e1da3505-cf9b-4bb4-8cd9-49cb6837bc8e,DISK] 2024-11-21T04:22:21,003 INFO [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/WALs/fdc0a393186c,34363,1732162938185/fdc0a393186c%2C34363%2C1732162938185.meta.1732162940941.meta 2024-11-21T04:22:21,003 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34453:34453),(127.0.0.1/127.0.0.1:44489:44489),(127.0.0.1/127.0.0.1:42959:42959)] 2024-11-21T04:22:21,004 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T04:22:21,005 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-21T04:22:21,007 INFO [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:22:21,008 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T04:22:21,011 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T04:22:21,013 INFO [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T04:22:21,025 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T04:22:21,026 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:21,026 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T04:22:21,026 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T04:22:21,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T04:22:21,045 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T04:22:21,045 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:21,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T04:22:21,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T04:22:21,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T04:22:21,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:21,053 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T04:22:21,053 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T04:22:21,056 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T04:22:21,056 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:21,057 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T04:22:21,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T04:22:21,059 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T04:22:21,060 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:21,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T04:22:21,061 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T04:22:21,063 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740 2024-11-21T04:22:21,067 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740 2024-11-21T04:22:21,071 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T04:22:21,071 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T04:22:21,072 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T04:22:21,076 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T04:22:21,078 INFO [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60370628, jitterRate=-0.10040754079818726}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T04:22:21,078 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T04:22:21,081 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732162941029Writing region info on filesystem at 1732162941029Initializing all the Stores at 1732162941035 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732162941036 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732162941038 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162941038Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732162941038Cleaning up temporary data from old regions at 1732162941071 (+33 ms)Running coprocessor post-open hooks at 1732162941078 (+7 ms)Region opened successfully at 1732162941081 (+3 ms) 2024-11-21T04:22:21,092 INFO [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732162940899 2024-11-21T04:22:21,111 DEBUG [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T04:22:21,112 INFO [RS_OPEN_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T04:22:21,115 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:22:21,120 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fdc0a393186c,34363,1732162938185, state=OPEN 2024-11-21T04:22:21,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T04:22:21,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T04:22:21,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T04:22:21,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-21T04:22:21,149 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T04:22:21,149 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T04:22:21,149 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T04:22:21,149 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-21T04:22:21,149 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=fdc0a393186c,34363,1732162938185 2024-11-21T04:22:21,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T04:22:21,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=fdc0a393186c,34363,1732162938185 in 422 msec 2024-11-21T04:22:21,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T04:22:21,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.1740 sec 2024-11-21T04:22:21,201 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T04:22:21,202 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T04:22:21,245 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:22:21,251 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:22:21,274 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:21,277 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52459, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:21,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.6300 sec 2024-11-21T04:22:21,319 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732162941319, completionTime=-1 2024-11-21T04:22:21,323 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-21T04:22:21,323 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-21T04:22:21,358 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-21T04:22:21,358 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732163001358 2024-11-21T04:22:21,358 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732163061358 2024-11-21T04:22:21,358 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 34 msec 2024-11-21T04:22:21,359 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-21T04:22:21,367 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,39577,1732162937127-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:21,367 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,39577,1732162937127-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:21,368 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,39577,1732162937127-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:21,369 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-fdc0a393186c:39577, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:21,370 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:21,370 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:21,379 DEBUG [master/fdc0a393186c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T04:22:21,411 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.990sec 2024-11-21T04:22:21,413 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T04:22:21,414 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T04:22:21,415 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T04:22:21,415 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T04:22:21,415 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T04:22:21,416 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,39577,1732162937127-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T04:22:21,416 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,39577,1732162937127-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T04:22:21,443 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T04:22:21,444 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is fdc0a393186c,39577,1732162937127 2024-11-21T04:22:21,446 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4a408711 2024-11-21T04:22:21,447 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T04:22:21,449 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38603, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T04:22:21,455 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T04:22:21,456 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2855f0d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:21,461 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-21T04:22:21,461 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-21T04:22:21,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-21T04:22:21,466 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:22:21,468 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:22:21,469 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:21,469 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:22:21,469 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-21T04:22:21,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T04:22:21,504 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:22:21,507 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:22:21,507 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:22:21,508 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b6befc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:21,508 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:22:21,508 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:22:21,517 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:22:21,529 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:21,534 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33606, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:22:21,546 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78fbff79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:21,546 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:22:21,574 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:22:21,575 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:21,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741837_1013 (size=349) 2024-11-21T04:22:21,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741837_1013 (size=349) 2024-11-21T04:22:21,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741837_1013 (size=349) 2024-11-21T04:22:21,598 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33354, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:21,601 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 796b8a6daae5bbdc223e78db15304576, NAME => 'hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:21,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=fdc0a393186c,39577,1732162937127 2024-11-21T04:22:21,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-21T04:22:21,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/test.cache.data in system properties and HBase conf 2024-11-21T04:22:21,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T04:22:21,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir in system properties and HBase conf 2024-11-21T04:22:21,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T04:22:21,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T04:22:21,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T04:22:21,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T04:22:21,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T04:22:21,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T04:22:21,604 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T04:22:21,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T04:22:21,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T04:22:21,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T04:22:21,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T04:22:21,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T04:22:21,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/nfs.dump.dir in system properties and HBase conf 2024-11-21T04:22:21,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/java.io.tmpdir in system properties and HBase conf 2024-11-21T04:22:21,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T04:22:21,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T04:22:21,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T04:22:21,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T04:22:21,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741838_1014 (size=36) 2024-11-21T04:22:21,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741838_1014 (size=36) 2024-11-21T04:22:21,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741838_1014 (size=36) 2024-11-21T04:22:21,643 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:21,643 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 796b8a6daae5bbdc223e78db15304576, disabling compactions & flushes 2024-11-21T04:22:21,643 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:22:21,644 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:22:21,644 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. after waiting 0 ms 2024-11-21T04:22:21,644 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:22:21,644 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:22:21,644 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 796b8a6daae5bbdc223e78db15304576: Waiting for close lock at 1732162941643Disabling compacts and flushes for region at 1732162941643Disabling writes for close at 1732162941644 (+1 ms)Writing region close event to WAL at 1732162941644Closed at 1732162941644 2024-11-21T04:22:21,647 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:22:21,659 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1732162941649"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732162941649"}]},"ts":"1732162941649"} 2024-11-21T04:22:21,665 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T04:22:21,668 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:22:21,671 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162941668"}]},"ts":"1732162941668"} 2024-11-21T04:22:21,676 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-21T04:22:21,677 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:22:21,678 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:22:21,678 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:22:21,678 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:22:21,679 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:22:21,679 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:22:21,679 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:22:21,679 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:22:21,679 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:22:21,679 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:22:21,679 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:22:21,681 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=796b8a6daae5bbdc223e78db15304576, ASSIGN}] 2024-11-21T04:22:21,684 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=796b8a6daae5bbdc223e78db15304576, ASSIGN 2024-11-21T04:22:21,686 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=796b8a6daae5bbdc223e78db15304576, ASSIGN; state=OFFLINE, location=fdc0a393186c,42393,1732162938268; forceNewPlan=false, retain=false 2024-11-21T04:22:21,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741839_1015 (size=592039) 2024-11-21T04:22:21,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741839_1015 (size=592039) 2024-11-21T04:22:21,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741839_1015 (size=592039) 2024-11-21T04:22:21,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741840_1016 (size=1663647) 2024-11-21T04:22:21,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741840_1016 (size=1663647) 2024-11-21T04:22:21,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741840_1016 (size=1663647) 2024-11-21T04:22:21,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T04:22:21,840 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-21T04:22:21,841 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=796b8a6daae5bbdc223e78db15304576, regionState=OPENING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:22:21,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=796b8a6daae5bbdc223e78db15304576, ASSIGN because future has completed 2024-11-21T04:22:21,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 796b8a6daae5bbdc223e78db15304576, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:22:22,037 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T04:22:22,077 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46291, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T04:22:22,119 INFO [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:22:22,120 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 796b8a6daae5bbdc223e78db15304576, NAME => 'hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576.', STARTKEY => '', ENDKEY => ''} 2024-11-21T04:22:22,120 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. service=AccessControlService 2024-11-21T04:22:22,121 INFO [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:22:22,121 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 796b8a6daae5bbdc223e78db15304576 2024-11-21T04:22:22,121 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:22,121 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 796b8a6daae5bbdc223e78db15304576 2024-11-21T04:22:22,122 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 796b8a6daae5bbdc223e78db15304576 2024-11-21T04:22:22,125 INFO [StoreOpener-796b8a6daae5bbdc223e78db15304576-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 796b8a6daae5bbdc223e78db15304576 2024-11-21T04:22:22,132 INFO [StoreOpener-796b8a6daae5bbdc223e78db15304576-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 796b8a6daae5bbdc223e78db15304576 columnFamilyName l 2024-11-21T04:22:22,133 DEBUG [StoreOpener-796b8a6daae5bbdc223e78db15304576-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:22,134 INFO [StoreOpener-796b8a6daae5bbdc223e78db15304576-1 {}] regionserver.HStore(327): Store=796b8a6daae5bbdc223e78db15304576/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:22:22,134 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 796b8a6daae5bbdc223e78db15304576 2024-11-21T04:22:22,136 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/acl/796b8a6daae5bbdc223e78db15304576 2024-11-21T04:22:22,137 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/acl/796b8a6daae5bbdc223e78db15304576 2024-11-21T04:22:22,139 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 796b8a6daae5bbdc223e78db15304576 2024-11-21T04:22:22,139 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 796b8a6daae5bbdc223e78db15304576 2024-11-21T04:22:22,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T04:22:22,149 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 796b8a6daae5bbdc223e78db15304576 2024-11-21T04:22:22,158 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/acl/796b8a6daae5bbdc223e78db15304576/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:22:22,160 INFO [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 796b8a6daae5bbdc223e78db15304576; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70252903, jitterRate=0.04684983193874359}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:22:22,160 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 796b8a6daae5bbdc223e78db15304576 2024-11-21T04:22:22,163 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 796b8a6daae5bbdc223e78db15304576: Running coprocessor pre-open hook at 1732162942122Writing region info on filesystem at 1732162942122Initializing all the Stores at 1732162942124 (+2 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732162942124Cleaning up temporary data from old regions at 1732162942140 (+16 ms)Running coprocessor post-open hooks at 1732162942160 (+20 ms)Region opened successfully at 1732162942163 (+3 ms) 2024-11-21T04:22:22,166 INFO [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., pid=6, masterSystemTime=1732162942037 2024-11-21T04:22:22,174 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=796b8a6daae5bbdc223e78db15304576, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:22:22,177 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:22:22,177 INFO [RS_OPEN_PRIORITY_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:22:22,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 796b8a6daae5bbdc223e78db15304576, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:22:22,209 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T04:22:22,210 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 796b8a6daae5bbdc223e78db15304576, server=fdc0a393186c,42393,1732162938268 in 337 msec 2024-11-21T04:22:22,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T04:22:22,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=796b8a6daae5bbdc223e78db15304576, ASSIGN in 520 msec 2024-11-21T04:22:22,215 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:22:22,216 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162942216"}]},"ts":"1732162942216"} 2024-11-21T04:22:22,229 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-21T04:22:22,233 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:22:22,240 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 775 msec 2024-11-21T04:22:22,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T04:22:22,655 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-21T04:22:22,659 DEBUG [master/fdc0a393186c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T04:22:22,661 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T04:22:22,661 INFO [master/fdc0a393186c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fdc0a393186c,39577,1732162937127-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T04:22:23,632 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:23,729 WARN [Thread-385 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:23,995 INFO [Thread-385 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T04:22:23,995 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-21T04:22:23,997 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T04:22:24,007 INFO [Thread-385 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T04:22:24,007 INFO [Thread-385 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T04:22:24,007 INFO [Thread-385 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T04:22:24,010 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@753579c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,AVAILABLE} 2024-11-21T04:22:24,011 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52bb7a0a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-21T04:22:24,013 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T04:22:24,013 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T04:22:24,013 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T04:22:24,019 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:24,029 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@779c58eb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,AVAILABLE} 2024-11-21T04:22:24,030 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fa2fe52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-21T04:22:24,198 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-11-21T04:22:24,198 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-11-21T04:22:24,198 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-21T04:22:24,201 INFO [Thread-385 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-21T04:22:24,280 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T04:22:24,781 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T04:22:25,241 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T04:22:25,287 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@40dd878e{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/java.io.tmpdir/jetty-localhost-37903-hadoop-yarn-common-3_4_1_jar-_-any-7009807586200736786/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-21T04:22:25,288 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6586f66b{HTTP/1.1, (http/1.1)}{localhost:37903} 2024-11-21T04:22:25,288 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b1ca313{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/java.io.tmpdir/jetty-localhost-34397-hadoop-yarn-common-3_4_1_jar-_-any-15104549409729335589/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-21T04:22:25,288 INFO [Time-limited test {}] server.Server(415): Started @18434ms 2024-11-21T04:22:25,289 INFO [Thread-385 {}] server.AbstractConnector(333): Started ServerConnector@32a9ec01{HTTP/1.1, (http/1.1)}{localhost:34397} 2024-11-21T04:22:25,289 INFO [Thread-385 {}] server.Server(415): Started @18435ms 2024-11-21T04:22:25,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741841_1017 (size=5) 2024-11-21T04:22:25,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741841_1017 (size=5) 2024-11-21T04:22:25,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741841_1017 (size=5) 2024-11-21T04:22:26,519 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-21T04:22:26,526 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:26,544 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:22:26,585 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-21T04:22:26,595 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T04:22:26,623 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T04:22:26,623 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T04:22:26,623 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T04:22:26,639 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:26,647 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c2b2bf3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,AVAILABLE} 2024-11-21T04:22:26,647 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fc26d2b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-21T04:22:26,734 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-21T04:22:26,734 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-21T04:22:26,734 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-21T04:22:26,735 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-21T04:22:26,777 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T04:22:26,779 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T04:22:26,782 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-21T04:22:26,799 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T04:22:27,055 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T04:22:27,076 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@410c57a8{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/java.io.tmpdir/jetty-localhost-44891-hadoop-yarn-common-3_4_1_jar-_-any-8823421399687380035/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-21T04:22:27,077 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b981737{HTTP/1.1, (http/1.1)}{localhost:44891} 2024-11-21T04:22:27,077 INFO [Time-limited test {}] server.Server(415): Started @20223ms 2024-11-21T04:22:27,354 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-21T04:22:27,358 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:27,375 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-21T04:22:27,376 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T04:22:27,380 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T04:22:27,381 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T04:22:27,381 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T04:22:27,383 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T04:22:27,383 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b506169{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,AVAILABLE} 2024-11-21T04:22:27,384 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f039685{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-21T04:22:27,452 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-21T04:22:27,452 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-21T04:22:27,452 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-21T04:22:27,452 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-21T04:22:27,466 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T04:22:27,474 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T04:22:27,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:22:27,561 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-21T04:22:27,562 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T04:22:27,562 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T04:22:27,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-21T04:22:27,564 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-21T04:22:27,565 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:22:27,565 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-21T04:22:27,566 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-21T04:22:27,566 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-21T04:22:27,566 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:22:27,566 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-21T04:22:27,567 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T04:22:27,567 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T04:22:27,567 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T04:22:27,567 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-21T04:22:27,632 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-21T04:22:27,639 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cb31a64{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/java.io.tmpdir/jetty-localhost-38251-hadoop-yarn-common-3_4_1_jar-_-any-16052445668159153959/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-21T04:22:27,641 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9fc67aa{HTTP/1.1, (http/1.1)}{localhost:38251} 2024-11-21T04:22:27,642 INFO [Time-limited test {}] server.Server(415): Started @20787ms 2024-11-21T04:22:27,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-21T04:22:27,694 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:22:27,740 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=722, OpenFileDescriptor=779, MaxFileDescriptor=1048576, SystemLoadAverage=406, ProcessCount=13, AvailableMemoryMB=4563 2024-11-21T04:22:27,743 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=722 is superior to 500 2024-11-21T04:22:27,756 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T04:22:27,764 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is fdc0a393186c,39577,1732162937127 2024-11-21T04:22:27,764 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3d934a3f 2024-11-21T04:22:27,766 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T04:22:27,785 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58998, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T04:22:27,787 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:22:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-21T04:22:27,794 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:22:27,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 7 2024-11-21T04:22:27,795 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T04:22:27,799 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:22:27,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741842_1018 (size=406) 2024-11-21T04:22:27,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741842_1018 (size=406) 2024-11-21T04:22:27,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741842_1018 (size=406) 2024-11-21T04:22:27,880 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 20d3cc01808951528b07429f11b02230, NAME => 'testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:27,890 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f685c181f7e21d99de2f5f58de3e2af4, NAME => 'testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:27,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T04:22:27,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741843_1019 (size=67) 2024-11-21T04:22:27,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741843_1019 (size=67) 2024-11-21T04:22:27,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741843_1019 (size=67) 2024-11-21T04:22:27,923 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:27,923 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 20d3cc01808951528b07429f11b02230, disabling compactions & flushes 2024-11-21T04:22:27,923 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:27,923 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:27,924 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. after waiting 0 ms 2024-11-21T04:22:27,924 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:27,924 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:27,924 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 20d3cc01808951528b07429f11b02230: Waiting for close lock at 1732162947923Disabling compacts and flushes for region at 1732162947923Disabling writes for close at 1732162947924 (+1 ms)Writing region close event to WAL at 1732162947924Closed at 1732162947924 2024-11-21T04:22:27,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741844_1020 (size=67) 2024-11-21T04:22:27,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741844_1020 (size=67) 2024-11-21T04:22:27,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741844_1020 (size=67) 2024-11-21T04:22:27,928 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:27,928 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing f685c181f7e21d99de2f5f58de3e2af4, disabling compactions & flushes 2024-11-21T04:22:27,928 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:27,928 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:27,928 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. after waiting 0 ms 2024-11-21T04:22:27,928 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:27,928 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:27,928 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for f685c181f7e21d99de2f5f58de3e2af4: Waiting for close lock at 1732162947928Disabling compacts and flushes for region at 1732162947928Disabling writes for close at 1732162947928Writing region close event to WAL at 1732162947928Closed at 1732162947928 2024-11-21T04:22:27,931 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:22:27,931 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732162947931"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732162947931"}]},"ts":"1732162947931"} 2024-11-21T04:22:27,932 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732162947931"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732162947931"}]},"ts":"1732162947931"} 2024-11-21T04:22:27,992 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:22:27,994 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:22:27,995 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162947995"}]},"ts":"1732162947995"} 2024-11-21T04:22:27,999 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-21T04:22:28,000 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:22:28,003 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:22:28,003 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:22:28,004 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:22:28,004 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:22:28,004 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:22:28,004 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:22:28,004 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:22:28,004 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:22:28,004 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:22:28,004 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:22:28,004 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=20d3cc01808951528b07429f11b02230, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f685c181f7e21d99de2f5f58de3e2af4, ASSIGN}] 2024-11-21T04:22:28,007 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f685c181f7e21d99de2f5f58de3e2af4, ASSIGN 2024-11-21T04:22:28,008 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=20d3cc01808951528b07429f11b02230, ASSIGN 2024-11-21T04:22:28,010 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f685c181f7e21d99de2f5f58de3e2af4, ASSIGN; state=OFFLINE, location=fdc0a393186c,42393,1732162938268; forceNewPlan=false, retain=false 2024-11-21T04:22:28,011 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=20d3cc01808951528b07429f11b02230, ASSIGN; state=OFFLINE, location=fdc0a393186c,39347,1732162938035; forceNewPlan=false, retain=false 2024-11-21T04:22:28,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T04:22:28,161 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:22:28,161 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=f685c181f7e21d99de2f5f58de3e2af4, regionState=OPENING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:22:28,162 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=20d3cc01808951528b07429f11b02230, regionState=OPENING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:22:28,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f685c181f7e21d99de2f5f58de3e2af4, ASSIGN because future has completed 2024-11-21T04:22:28,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure f685c181f7e21d99de2f5f58de3e2af4, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:22:28,189 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=20d3cc01808951528b07429f11b02230, ASSIGN because future has completed 2024-11-21T04:22:28,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 20d3cc01808951528b07429f11b02230, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:22:28,367 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T04:22:28,391 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:28,391 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => f685c181f7e21d99de2f5f58de3e2af4, NAME => 'testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T04:22:28,392 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. service=AccessControlService 2024-11-21T04:22:28,392 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:22:28,392 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:28,392 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:28,393 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:28,393 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:28,393 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60985, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T04:22:28,415 INFO [StoreOpener-f685c181f7e21d99de2f5f58de3e2af4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:28,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T04:22:28,424 INFO [StoreOpener-f685c181f7e21d99de2f5f58de3e2af4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f685c181f7e21d99de2f5f58de3e2af4 columnFamilyName cf 2024-11-21T04:22:28,424 DEBUG [StoreOpener-f685c181f7e21d99de2f5f58de3e2af4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:28,426 INFO [StoreOpener-f685c181f7e21d99de2f5f58de3e2af4-1 {}] regionserver.HStore(327): Store=f685c181f7e21d99de2f5f58de3e2af4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:22:28,426 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:28,428 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:28,429 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:28,430 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:28,430 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:28,434 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:28,435 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 20d3cc01808951528b07429f11b02230, NAME => 'testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T04:22:28,435 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:28,435 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. service=AccessControlService 2024-11-21T04:22:28,435 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:22:28,436 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:28,436 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:28,436 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:28,436 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:28,447 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:22:28,448 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened f685c181f7e21d99de2f5f58de3e2af4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65688623, jitterRate=-0.02116324007511139}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:22:28,449 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:28,450 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for f685c181f7e21d99de2f5f58de3e2af4: Running coprocessor pre-open hook at 1732162948393Writing region info on filesystem at 1732162948393Initializing all the Stores at 1732162948395 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162948395Cleaning up temporary data from old regions at 1732162948430 (+35 ms)Running coprocessor post-open hooks at 1732162948449 (+19 ms)Region opened successfully at 1732162948450 (+1 ms) 2024-11-21T04:22:28,453 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4., pid=10, masterSystemTime=1732162948359 2024-11-21T04:22:28,460 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:28,460 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:28,464 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=f685c181f7e21d99de2f5f58de3e2af4, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:22:28,465 INFO [StoreOpener-20d3cc01808951528b07429f11b02230-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:28,468 INFO [StoreOpener-20d3cc01808951528b07429f11b02230-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 20d3cc01808951528b07429f11b02230 columnFamilyName cf 2024-11-21T04:22:28,469 DEBUG [StoreOpener-20d3cc01808951528b07429f11b02230-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:28,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure f685c181f7e21d99de2f5f58de3e2af4, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:22:28,471 INFO [StoreOpener-20d3cc01808951528b07429f11b02230-1 {}] regionserver.HStore(327): Store=20d3cc01808951528b07429f11b02230/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:22:28,471 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:28,473 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230 2024-11-21T04:22:28,479 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230 2024-11-21T04:22:28,480 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:28,480 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:28,486 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:28,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T04:22:28,499 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure f685c181f7e21d99de2f5f58de3e2af4, server=fdc0a393186c,42393,1732162938268 in 298 msec 2024-11-21T04:22:28,499 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:22:28,501 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 20d3cc01808951528b07429f11b02230; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62505644, jitterRate=-0.06859332323074341}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:22:28,501 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:28,505 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 20d3cc01808951528b07429f11b02230: Running coprocessor pre-open hook at 1732162948436Writing region info on filesystem at 1732162948436Initializing all the Stores at 1732162948446 (+10 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162948446Cleaning up temporary data from old regions at 1732162948480 (+34 ms)Running coprocessor post-open hooks at 1732162948501 (+21 ms)Region opened successfully at 1732162948501 2024-11-21T04:22:28,507 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230., pid=11, masterSystemTime=1732162948366 2024-11-21T04:22:28,511 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f685c181f7e21d99de2f5f58de3e2af4, ASSIGN in 495 msec 2024-11-21T04:22:28,513 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:28,513 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:28,514 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=20d3cc01808951528b07429f11b02230, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:22:28,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 20d3cc01808951528b07429f11b02230, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:22:28,538 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-11-21T04:22:28,538 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 20d3cc01808951528b07429f11b02230, server=fdc0a393186c,39347,1732162938035 in 339 msec 2024-11-21T04:22:28,546 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-21T04:22:28,546 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=20d3cc01808951528b07429f11b02230, ASSIGN in 535 msec 2024-11-21T04:22:28,547 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:22:28,547 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162948547"}]},"ts":"1732162948547"} 2024-11-21T04:22:28,553 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-21T04:22:28,555 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:22:28,561 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-21T04:22:28,576 DEBUG [PEWorker-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:22:28,590 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:28,592 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60041, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:28,599 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:22:28,599 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:22:28,600 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:28,625 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52881, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-21T04:22:28,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:22:28,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:28,631 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36189, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-21T04:22:28,633 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-21T04:22:28,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:28,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:28,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:28,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:28,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:28,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:28,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:28,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:22:28,723 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-21T04:22:28,724 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-21T04:22:28,725 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-21T04:22:28,725 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-21T04:22:28,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 937 msec 2024-11-21T04:22:28,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T04:22:28,933 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-21T04:22:28,934 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-11-21T04:22:28,935 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:22:28,947 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-11-21T04:22:28,948 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:22:28,948 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-11-21T04:22:28,952 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-21T04:22:28,970 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-21T04:22:28,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732162948970 (current time:1732162948970). 2024-11-21T04:22:28,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:22:28,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-21T04:22:28,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:22:28,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f112e14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:28,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:22:28,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:22:28,977 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:22:28,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:22:28,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:22:28,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22db5cc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:28,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:22:28,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:22:28,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:28,981 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59016, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:22:28,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@423db164, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:28,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:22:28,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:22:28,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:28,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48218, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:28,993 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:22:28,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:22:29,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:29,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:29,002 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:22:29,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dd1a593, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:29,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:22:29,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:22:29,012 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:22:29,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:22:29,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:22:29,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@131b3a3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:29,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:22:29,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:22:29,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:29,016 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59026, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:22:29,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bb79a2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:29,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:22:29,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:22:29,023 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:29,025 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48234, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:29,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:22:29,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:29,031 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47896, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:29,034 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:22:29,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:22:29,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:29,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:29,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-21T04:22:29,039 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:22:29,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:22:29,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-21T04:22:29,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-21T04:22:29,070 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:22:29,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T04:22:29,078 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:22:29,101 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:22:29,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741845_1021 (size=167) 2024-11-21T04:22:29,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741845_1021 (size=167) 2024-11-21T04:22:29,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741845_1021 (size=167) 2024-11-21T04:22:29,175 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:22:29,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 20d3cc01808951528b07429f11b02230}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f685c181f7e21d99de2f5f58de3e2af4}] 2024-11-21T04:22:29,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T04:22:29,198 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:29,198 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:29,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-21T04:22:29,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-21T04:22:29,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:29,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:29,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 20d3cc01808951528b07429f11b02230: 2024-11-21T04:22:29,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for f685c181f7e21d99de2f5f58de3e2af4: 2024-11-21T04:22:29,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. for emptySnaptb0-testExportWithTargetName completed. 2024-11-21T04:22:29,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. for emptySnaptb0-testExportWithTargetName completed. 2024-11-21T04:22:29,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-21T04:22:29,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T04:22:29,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:22:29,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:22:29,403 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-21T04:22:29,403 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:22:29,403 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:22:29,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741846_1022 (size=70) 2024-11-21T04:22:29,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741846_1022 (size=70) 2024-11-21T04:22:29,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:29,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741846_1022 (size=70) 2024-11-21T04:22:29,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-21T04:22:29,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-21T04:22:29,465 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:29,465 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:29,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f685c181f7e21d99de2f5f58de3e2af4 in 290 msec 2024-11-21T04:22:29,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741847_1023 (size=70) 2024-11-21T04:22:29,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741847_1023 (size=70) 2024-11-21T04:22:29,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741847_1023 (size=70) 2024-11-21T04:22:29,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:29,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-21T04:22:29,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-21T04:22:29,490 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:29,491 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:29,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-21T04:22:29,505 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 20d3cc01808951528b07429f11b02230 in 315 msec 2024-11-21T04:22:29,505 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:22:29,508 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:22:29,513 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:22:29,513 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-21T04:22:29,516 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-21T04:22:29,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741848_1024 (size=549) 2024-11-21T04:22:29,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741848_1024 (size=549) 2024-11-21T04:22:29,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741848_1024 (size=549) 2024-11-21T04:22:29,575 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:22:29,596 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:22:29,598 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-21T04:22:29,602 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:22:29,602 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-21T04:22:29,608 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 555 msec 2024-11-21T04:22:29,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T04:22:29,703 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-21T04:22:29,717 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='0f8fc4dc1874360bd1839b1f46af8fbd1', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230., hostname=fdc0a393186c,39347,1732162938035, seqNum=2] 2024-11-21T04:22:29,719 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='16ee50c8872aebe10a0c3454a064fe819', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:22:29,722 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='296f0374a5eb7a3c9bf1946e87435f193', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:22:29,727 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:29,728 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='314054e4caa28029f08b27e5fe09e0151', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:22:29,729 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:29,733 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55738, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:29,735 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47908, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:29,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39347 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:22:29,747 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42393 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:22:29,751 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-21T04:22:29,756 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-21T04:22:29,757 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:29,758 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:22:29,761 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-21T04:22:29,776 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-21T04:22:29,786 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-21T04:22:29,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-21T04:22:29,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732162949791 (current time:1732162949791). 2024-11-21T04:22:29,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:22:29,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-21T04:22:29,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:22:29,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@740ef660, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:29,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:22:29,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:22:29,796 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:22:29,796 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:22:29,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:22:29,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62607756, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:29,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:22:29,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:22:29,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:29,799 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59040, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:22:29,800 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@283c1e3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:29,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:22:29,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:22:29,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:29,803 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48238, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:29,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:22:29,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:22:29,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:29,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:29,806 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:22:29,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28a49313, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:29,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:22:29,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:22:29,809 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:22:29,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:22:29,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:22:29,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74b3e1de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:29,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:22:29,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:22:29,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:29,812 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59046, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:22:29,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f0d2ca0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:29,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:22:29,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:22:29,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:29,819 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48254, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:29,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:22:29,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:29,827 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47922, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:29,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:22:29,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:22:29,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:29,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:29,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-21T04:22:29,832 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:22:29,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:22:29,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-21T04:22:29,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-21T04:22:29,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-21T04:22:29,848 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:22:29,851 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:22:29,857 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:22:29,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741849_1025 (size=162) 2024-11-21T04:22:29,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741849_1025 (size=162) 2024-11-21T04:22:29,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741849_1025 (size=162) 2024-11-21T04:22:29,891 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:22:29,891 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 20d3cc01808951528b07429f11b02230}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f685c181f7e21d99de2f5f58de3e2af4}] 2024-11-21T04:22:29,894 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:29,894 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:29,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-21T04:22:30,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-21T04:22:30,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-21T04:22:30,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:30,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:30,056 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing f685c181f7e21d99de2f5f58de3e2af4 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-21T04:22:30,056 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 20d3cc01808951528b07429f11b02230 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-21T04:22:30,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/.tmp/cf/25285f0218ee479e92b6acb6eae37440 is 71, key is 00216a6798984a586b6bd2b977cb4aaf/cf:q/1732162949735/Put/seqid=0 2024-11-21T04:22:30,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/.tmp/cf/25470a9c035f4307bc77875bd3ac2c80 is 71, key is 18f887a4f682781c3228ca43c54396eb/cf:q/1732162949747/Put/seqid=0 2024-11-21T04:22:30,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-21T04:22:30,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741851_1027 (size=8258) 2024-11-21T04:22:30,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741851_1027 (size=8258) 2024-11-21T04:22:30,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741851_1027 (size=8258) 2024-11-21T04:22:30,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741850_1026 (size=5356) 2024-11-21T04:22:30,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741850_1026 (size=5356) 2024-11-21T04:22:30,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741850_1026 (size=5356) 2024-11-21T04:22:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-21T04:22:30,579 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/.tmp/cf/25470a9c035f4307bc77875bd3ac2c80 2024-11-21T04:22:30,593 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/.tmp/cf/25285f0218ee479e92b6acb6eae37440 2024-11-21T04:22:30,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/.tmp/cf/25285f0218ee479e92b6acb6eae37440 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/cf/25285f0218ee479e92b6acb6eae37440 2024-11-21T04:22:30,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/.tmp/cf/25470a9c035f4307bc77875bd3ac2c80 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/cf/25470a9c035f4307bc77875bd3ac2c80 2024-11-21T04:22:30,677 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/cf/25470a9c035f4307bc77875bd3ac2c80, entries=46, sequenceid=6, filesize=8.1 K 2024-11-21T04:22:30,677 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/cf/25285f0218ee479e92b6acb6eae37440, entries=4, sequenceid=6, filesize=5.2 K 2024-11-21T04:22:30,688 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 20d3cc01808951528b07429f11b02230 in 632ms, sequenceid=6, compaction requested=false 2024-11-21T04:22:30,688 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for f685c181f7e21d99de2f5f58de3e2af4 in 627ms, sequenceid=6, compaction requested=false 2024-11-21T04:22:30,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-21T04:22:30,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-21T04:22:30,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for f685c181f7e21d99de2f5f58de3e2af4: 2024-11-21T04:22:30,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 20d3cc01808951528b07429f11b02230: 2024-11-21T04:22:30,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. for snaptb0-testExportWithTargetName completed. 2024-11-21T04:22:30,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. for snaptb0-testExportWithTargetName completed. 2024-11-21T04:22:30,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-21T04:22:30,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-21T04:22:30,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:22:30,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:22:30,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/cf/25285f0218ee479e92b6acb6eae37440] hfiles 2024-11-21T04:22:30,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/cf/25470a9c035f4307bc77875bd3ac2c80] hfiles 2024-11-21T04:22:30,693 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/cf/25470a9c035f4307bc77875bd3ac2c80 for snapshot=snaptb0-testExportWithTargetName 2024-11-21T04:22:30,693 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/cf/25285f0218ee479e92b6acb6eae37440 for snapshot=snaptb0-testExportWithTargetName 2024-11-21T04:22:30,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741852_1028 (size=109) 2024-11-21T04:22:30,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741852_1028 (size=109) 2024-11-21T04:22:30,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741852_1028 (size=109) 2024-11-21T04:22:30,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:30,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-21T04:22:30,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-21T04:22:30,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:30,735 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:30,740 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 20d3cc01808951528b07429f11b02230 in 845 msec 2024-11-21T04:22:30,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741853_1029 (size=109) 2024-11-21T04:22:30,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741853_1029 (size=109) 2024-11-21T04:22:30,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741853_1029 (size=109) 2024-11-21T04:22:30,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:30,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-21T04:22:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-21T04:22:30,749 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:30,749 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:30,754 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-11-21T04:22:30,755 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f685c181f7e21d99de2f5f58de3e2af4 in 860 msec 2024-11-21T04:22:30,755 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:22:30,757 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:22:30,759 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:22:30,759 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-21T04:22:30,761 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-21T04:22:30,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741854_1030 (size=627) 2024-11-21T04:22:30,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741854_1030 (size=627) 2024-11-21T04:22:30,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741854_1030 (size=627) 2024-11-21T04:22:30,860 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:22:30,886 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:22:30,887 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-21T04:22:30,890 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:22:30,890 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-21T04:22:30,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 1.0560 sec 2024-11-21T04:22:30,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-21T04:22:30,984 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-21T04:22:30,984 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162950984 2024-11-21T04:22:30,984 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44067, tgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162950984, rawTgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162950984, srcFsUri=hdfs://localhost:44067, srcDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:31,043 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44067, inputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:31,043 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162950984, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162950984/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-21T04:22:31,048 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T04:22:31,062 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162950984/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-21T04:22:31,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741855_1031 (size=162) 2024-11-21T04:22:31,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741855_1031 (size=162) 2024-11-21T04:22:31,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741855_1031 (size=162) 2024-11-21T04:22:31,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741856_1032 (size=627) 2024-11-21T04:22:31,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741856_1032 (size=627) 2024-11-21T04:22:31,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741856_1032 (size=627) 2024-11-21T04:22:31,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741857_1033 (size=154) 2024-11-21T04:22:31,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741857_1033 (size=154) 2024-11-21T04:22:31,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741857_1033 (size=154) 2024-11-21T04:22:31,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:22:31,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:22:31,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:22:32,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-8034578745613398394.jar 2024-11-21T04:22:32,562 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:22:32,563 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:22:32,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-9647100405964283995.jar 2024-11-21T04:22:32,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:22:32,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:22:32,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:22:32,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:22:32,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:22:32,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:22:32,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T04:22:32,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T04:22:32,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T04:22:32,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T04:22:32,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T04:22:32,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T04:22:32,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T04:22:32,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T04:22:32,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T04:22:32,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T04:22:32,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T04:22:32,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:22:32,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:22:32,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:22:32,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:22:32,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:22:32,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:22:32,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:22:32,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741858_1034 (size=131440) 2024-11-21T04:22:32,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741858_1034 (size=131440) 2024-11-21T04:22:32,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741858_1034 (size=131440) 2024-11-21T04:22:32,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741859_1035 (size=4188619) 2024-11-21T04:22:32,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741859_1035 (size=4188619) 2024-11-21T04:22:32,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741859_1035 (size=4188619) 2024-11-21T04:22:33,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741860_1036 (size=1323991) 2024-11-21T04:22:33,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741860_1036 (size=1323991) 2024-11-21T04:22:33,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741860_1036 (size=1323991) 2024-11-21T04:22:33,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741861_1037 (size=903734) 2024-11-21T04:22:33,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741861_1037 (size=903734) 2024-11-21T04:22:33,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741861_1037 (size=903734) 2024-11-21T04:22:33,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741862_1038 (size=8360083) 2024-11-21T04:22:33,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741862_1038 (size=8360083) 2024-11-21T04:22:33,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741862_1038 (size=8360083) 2024-11-21T04:22:33,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741863_1039 (size=1877034) 2024-11-21T04:22:33,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741863_1039 (size=1877034) 2024-11-21T04:22:33,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741863_1039 (size=1877034) 2024-11-21T04:22:33,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741864_1040 (size=77835) 2024-11-21T04:22:33,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741864_1040 (size=77835) 2024-11-21T04:22:33,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741864_1040 (size=77835) 2024-11-21T04:22:33,896 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:22:33,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741865_1041 (size=30949) 2024-11-21T04:22:33,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741865_1041 (size=30949) 2024-11-21T04:22:33,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741865_1041 (size=30949) 2024-11-21T04:22:34,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741866_1042 (size=6424740) 2024-11-21T04:22:34,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741866_1042 (size=6424740) 2024-11-21T04:22:34,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741866_1042 (size=6424740) 2024-11-21T04:22:34,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741867_1043 (size=1597327) 2024-11-21T04:22:34,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741867_1043 (size=1597327) 2024-11-21T04:22:34,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741867_1043 (size=1597327) 2024-11-21T04:22:34,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741868_1044 (size=4695811) 2024-11-21T04:22:34,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741868_1044 (size=4695811) 2024-11-21T04:22:34,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741868_1044 (size=4695811) 2024-11-21T04:22:34,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741869_1045 (size=232957) 2024-11-21T04:22:34,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741869_1045 (size=232957) 2024-11-21T04:22:34,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741869_1045 (size=232957) 2024-11-21T04:22:34,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741870_1046 (size=127628) 2024-11-21T04:22:34,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741870_1046 (size=127628) 2024-11-21T04:22:34,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741870_1046 (size=127628) 2024-11-21T04:22:34,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741871_1047 (size=20406) 2024-11-21T04:22:34,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741871_1047 (size=20406) 2024-11-21T04:22:34,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741871_1047 (size=20406) 2024-11-21T04:22:34,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741872_1048 (size=5175431) 2024-11-21T04:22:34,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741872_1048 (size=5175431) 2024-11-21T04:22:34,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741872_1048 (size=5175431) 2024-11-21T04:22:34,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741873_1049 (size=217634) 2024-11-21T04:22:34,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741873_1049 (size=217634) 2024-11-21T04:22:34,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741873_1049 (size=217634) 2024-11-21T04:22:34,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741874_1050 (size=1832290) 2024-11-21T04:22:34,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741874_1050 (size=1832290) 2024-11-21T04:22:34,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741874_1050 (size=1832290) 2024-11-21T04:22:34,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741875_1051 (size=322274) 2024-11-21T04:22:34,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741875_1051 (size=322274) 2024-11-21T04:22:34,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741875_1051 (size=322274) 2024-11-21T04:22:34,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741876_1052 (size=440656) 2024-11-21T04:22:34,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741876_1052 (size=440656) 2024-11-21T04:22:34,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741876_1052 (size=440656) 2024-11-21T04:22:34,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741877_1053 (size=503880) 2024-11-21T04:22:34,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741877_1053 (size=503880) 2024-11-21T04:22:34,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741877_1053 (size=503880) 2024-11-21T04:22:34,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741878_1054 (size=29229) 2024-11-21T04:22:34,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741878_1054 (size=29229) 2024-11-21T04:22:34,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741878_1054 (size=29229) 2024-11-21T04:22:34,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741879_1055 (size=24096) 2024-11-21T04:22:34,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741879_1055 (size=24096) 2024-11-21T04:22:34,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741879_1055 (size=24096) 2024-11-21T04:22:34,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741880_1056 (size=111872) 2024-11-21T04:22:34,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741880_1056 (size=111872) 2024-11-21T04:22:34,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741880_1056 (size=111872) 2024-11-21T04:22:34,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741881_1057 (size=45609) 2024-11-21T04:22:34,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741881_1057 (size=45609) 2024-11-21T04:22:34,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741881_1057 (size=45609) 2024-11-21T04:22:34,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741882_1058 (size=136454) 2024-11-21T04:22:34,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741882_1058 (size=136454) 2024-11-21T04:22:34,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741882_1058 (size=136454) 2024-11-21T04:22:34,990 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T04:22:34,998 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-21T04:22:35,015 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T04:22:35,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741883_1059 (size=342) 2024-11-21T04:22:35,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741883_1059 (size=342) 2024-11-21T04:22:35,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741883_1059 (size=342) 2024-11-21T04:22:35,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741884_1060 (size=15) 2024-11-21T04:22:35,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741884_1060 (size=15) 2024-11-21T04:22:35,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741884_1060 (size=15) 2024-11-21T04:22:36,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741885_1061 (size=303735) 2024-11-21T04:22:36,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741885_1061 (size=303735) 2024-11-21T04:22:36,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741885_1061 (size=303735) 2024-11-21T04:22:36,671 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:22:36,671 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:22:37,285 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0001_000001 (auth:SIMPLE) from 127.0.0.1:50742 2024-11-21T04:22:37,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-21T04:22:37,560 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-21T04:22:46,119 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0001_000001 (auth:SIMPLE) from 127.0.0.1:44286 2024-11-21T04:22:46,348 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:22:46,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741886_1062 (size=349385) 2024-11-21T04:22:46,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741886_1062 (size=349385) 2024-11-21T04:22:46,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741886_1062 (size=349385) 2024-11-21T04:22:48,636 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0001_000001 (auth:SIMPLE) from 127.0.0.1:37040 2024-11-21T04:22:54,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741887_1063 (size=8258) 2024-11-21T04:22:54,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741887_1063 (size=8258) 2024-11-21T04:22:54,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741887_1063 (size=8258) 2024-11-21T04:22:54,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741888_1064 (size=5356) 2024-11-21T04:22:54,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741888_1064 (size=5356) 2024-11-21T04:22:54,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741888_1064 (size=5356) 2024-11-21T04:22:54,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741889_1065 (size=17419) 2024-11-21T04:22:54,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741889_1065 (size=17419) 2024-11-21T04:22:54,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741889_1065 (size=17419) 2024-11-21T04:22:54,576 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0001/container_1732162945431_0001_01_000002/launch_container.sh] 2024-11-21T04:22:54,576 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0001/container_1732162945431_0001_01_000002/container_tokens] 2024-11-21T04:22:54,576 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0001/container_1732162945431_0001_01_000002/sysfs] 2024-11-21T04:22:54,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741890_1066 (size=464) 2024-11-21T04:22:54,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741890_1066 (size=464) 2024-11-21T04:22:54,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741890_1066 (size=464) 2024-11-21T04:22:54,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741891_1067 (size=17419) 2024-11-21T04:22:54,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741891_1067 (size=17419) 2024-11-21T04:22:54,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741891_1067 (size=17419) 2024-11-21T04:22:54,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741892_1068 (size=349385) 2024-11-21T04:22:54,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741892_1068 (size=349385) 2024-11-21T04:22:54,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741892_1068 (size=349385) 2024-11-21T04:22:54,710 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0001_000001 (auth:SIMPLE) from 127.0.0.1:53466 2024-11-21T04:22:56,039 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T04:22:56,040 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T04:22:56,048 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: testExportWithTargetName 2024-11-21T04:22:56,048 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T04:22:56,049 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T04:22:56,049 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-21T04:22:56,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-21T04:22:56,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-21T04:22:56,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162950984/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162950984/.hbase-snapshot/testExportWithTargetName 2024-11-21T04:22:56,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162950984/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-21T04:22:56,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162950984/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-21T04:22:56,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-11-21T04:22:56,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-21T04:22:56,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-21T04:22:56,076 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162976075"}]},"ts":"1732162976075"} 2024-11-21T04:22:56,078 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-21T04:22:56,078 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-21T04:22:56,081 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-21T04:22:56,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=20d3cc01808951528b07429f11b02230, UNASSIGN}, {pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f685c181f7e21d99de2f5f58de3e2af4, UNASSIGN}] 2024-11-21T04:22:56,091 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=20d3cc01808951528b07429f11b02230, UNASSIGN 2024-11-21T04:22:56,091 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f685c181f7e21d99de2f5f58de3e2af4, UNASSIGN 2024-11-21T04:22:56,094 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=20d3cc01808951528b07429f11b02230, regionState=CLOSING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:22:56,094 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=f685c181f7e21d99de2f5f58de3e2af4, regionState=CLOSING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:22:56,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f685c181f7e21d99de2f5f58de3e2af4, UNASSIGN because future has completed 2024-11-21T04:22:56,098 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:22:56,098 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure f685c181f7e21d99de2f5f58de3e2af4, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:22:56,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=20d3cc01808951528b07429f11b02230, UNASSIGN because future has completed 2024-11-21T04:22:56,101 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:22:56,101 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 20d3cc01808951528b07429f11b02230, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:22:56,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-21T04:22:56,255 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(122): Close f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:56,255 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:22:56,256 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:56,256 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:22:56,256 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1722): Closing f685c181f7e21d99de2f5f58de3e2af4, disabling compactions & flushes 2024-11-21T04:22:56,256 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:56,256 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 20d3cc01808951528b07429f11b02230, disabling compactions & flushes 2024-11-21T04:22:56,256 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:56,256 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:56,256 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. after waiting 0 ms 2024-11-21T04:22:56,256 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:56,257 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:56,257 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. after waiting 0 ms 2024-11-21T04:22:56,257 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:56,262 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:22:56,262 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:22:56,265 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:22:56,265 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:22:56,265 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4. 2024-11-21T04:22:56,265 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230. 2024-11-21T04:22:56,265 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 20d3cc01808951528b07429f11b02230: Waiting for close lock at 1732162976256Running coprocessor pre-close hooks at 1732162976256Disabling compacts and flushes for region at 1732162976256Disabling writes for close at 1732162976257 (+1 ms)Writing region close event to WAL at 1732162976258 (+1 ms)Running coprocessor post-close hooks at 1732162976263 (+5 ms)Closed at 1732162976265 (+2 ms) 2024-11-21T04:22:56,266 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1676): Region close journal for f685c181f7e21d99de2f5f58de3e2af4: Waiting for close lock at 1732162976256Running coprocessor pre-close hooks at 1732162976256Disabling compacts and flushes for region at 1732162976256Disabling writes for close at 1732162976256Writing region close event to WAL at 1732162976258 (+2 ms)Running coprocessor post-close hooks at 1732162976263 (+5 ms)Closed at 1732162976265 (+2 ms) 2024-11-21T04:22:56,273 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 20d3cc01808951528b07429f11b02230 2024-11-21T04:22:56,274 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=20d3cc01808951528b07429f11b02230, regionState=CLOSED 2024-11-21T04:22:56,274 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(157): Closed f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:56,275 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=f685c181f7e21d99de2f5f58de3e2af4, regionState=CLOSED 2024-11-21T04:22:56,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 20d3cc01808951528b07429f11b02230, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:22:56,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure f685c181f7e21d99de2f5f58de3e2af4, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:22:56,281 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=20 2024-11-21T04:22:56,281 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=20, state=SUCCESS, hasLock=false; CloseRegionProcedure 20d3cc01808951528b07429f11b02230, server=fdc0a393186c,39347,1732162938035 in 177 msec 2024-11-21T04:22:56,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-11-21T04:22:56,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; CloseRegionProcedure f685c181f7e21d99de2f5f58de3e2af4, server=fdc0a393186c,42393,1732162938268 in 181 msec 2024-11-21T04:22:56,284 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=20d3cc01808951528b07429f11b02230, UNASSIGN in 193 msec 2024-11-21T04:22:56,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=19 2024-11-21T04:22:56,286 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f685c181f7e21d99de2f5f58de3e2af4, UNASSIGN in 194 msec 2024-11-21T04:22:56,290 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-21T04:22:56,290 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 206 msec 2024-11-21T04:22:56,292 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162976292"}]},"ts":"1732162976292"} 2024-11-21T04:22:56,295 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-21T04:22:56,295 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-21T04:22:56,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 230 msec 2024-11-21T04:22:56,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-21T04:22:56,392 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-21T04:22:56,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-11-21T04:22:56,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T04:22:56,404 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T04:22:56,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-21T04:22:56,408 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=24, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T04:22:56,412 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-21T04:22:56,416 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230 2024-11-21T04:22:56,416 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:56,420 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/recovered.edits] 2024-11-21T04:22:56,420 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/recovered.edits] 2024-11-21T04:22:56,429 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/cf/25285f0218ee479e92b6acb6eae37440 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/cf/25285f0218ee479e92b6acb6eae37440 2024-11-21T04:22:56,429 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/cf/25470a9c035f4307bc77875bd3ac2c80 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/cf/25470a9c035f4307bc77875bd3ac2c80 2024-11-21T04:22:56,433 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4/recovered.edits/9.seqid 2024-11-21T04:22:56,434 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230/recovered.edits/9.seqid 2024-11-21T04:22:56,434 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/f685c181f7e21d99de2f5f58de3e2af4 2024-11-21T04:22:56,434 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithTargetName/20d3cc01808951528b07429f11b02230 2024-11-21T04:22:56,434 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-21T04:22:56,438 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=24, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T04:22:56,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34363 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-21T04:22:56,449 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-21T04:22:56,455 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-21T04:22:56,457 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=24, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T04:22:56,457 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-21T04:22:56,458 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732162976457"}]},"ts":"9223372036854775807"} 2024-11-21T04:22:56,458 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732162976457"}]},"ts":"9223372036854775807"} 2024-11-21T04:22:56,461 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T04:22:56,462 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 20d3cc01808951528b07429f11b02230, NAME => 'testtb-testExportWithTargetName,,1732162947786.20d3cc01808951528b07429f11b02230.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f685c181f7e21d99de2f5f58de3e2af4, NAME => 'testtb-testExportWithTargetName,1,1732162947786.f685c181f7e21d99de2f5f58de3e2af4.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T04:22:56,462 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-21T04:22:56,462 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732162976462"}]},"ts":"9223372036854775807"} 2024-11-21T04:22:56,465 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-21T04:22:56,467 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=24, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-21T04:22:56,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T04:22:56,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T04:22:56,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T04:22:56,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-21T04:22:56,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 69 msec 2024-11-21T04:22:56,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:56,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:56,479 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-11-21T04:22:56,479 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-21T04:22:56,479 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-11-21T04:22:56,479 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-11-21T04:22:56,479 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-21T04:22:56,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:56,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:56,479 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-11-21T04:22:56,479 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-21T04:22:56,479 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-21T04:22:56,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-21T04:22:56,482 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-21T04:22:56,483 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-21T04:22:56,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-21T04:22:56,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-21T04:22:56,507 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-21T04:22:56,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-21T04:22:56,538 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=777 (was 722) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-524535358_1 at /127.0.0.1:43442 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44015 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 114206) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45831 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (625843813) connection to localhost/127.0.0.1:44015 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:45414 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (625843813) connection to localhost/127.0.0.1:37599 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1260 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37599 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (625843813) connection to localhost/127.0.0.1:45831 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:56062 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=810 (was 779) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=620 (was 406) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 13) - ProcessCount LEAK? -, AvailableMemoryMB=1981 (was 4563) 2024-11-21T04:22:56,539 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=777 is superior to 500 2024-11-21T04:22:56,558 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=777, OpenFileDescriptor=810, MaxFileDescriptor=1048576, SystemLoadAverage=620, ProcessCount=19, AvailableMemoryMB=1978 2024-11-21T04:22:56,559 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=777 is superior to 500 2024-11-21T04:22:56,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:22:56,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T04:22:56,564 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:22:56,564 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:56,565 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 25 2024-11-21T04:22:56,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-21T04:22:56,566 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:22:56,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741893_1069 (size=404) 2024-11-21T04:22:56,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741893_1069 (size=404) 2024-11-21T04:22:56,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741893_1069 (size=404) 2024-11-21T04:22:56,578 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 72a2e6b9ecc445dc6f2caad25d1a0335, NAME => 'testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:56,578 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 6e16a083394339512ddf4220646112fd, NAME => 'testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:56,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741895_1071 (size=65) 2024-11-21T04:22:56,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741895_1071 (size=65) 2024-11-21T04:22:56,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741895_1071 (size=65) 2024-11-21T04:22:56,589 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:56,589 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 6e16a083394339512ddf4220646112fd, disabling compactions & flushes 2024-11-21T04:22:56,589 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:22:56,589 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:22:56,589 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. after waiting 0 ms 2024-11-21T04:22:56,589 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:22:56,589 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:22:56,590 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 6e16a083394339512ddf4220646112fd: Waiting for close lock at 1732162976589Disabling compacts and flushes for region at 1732162976589Disabling writes for close at 1732162976589Writing region close event to WAL at 1732162976589Closed at 1732162976589 2024-11-21T04:22:56,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741894_1070 (size=65) 2024-11-21T04:22:56,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741894_1070 (size=65) 2024-11-21T04:22:56,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741894_1070 (size=65) 2024-11-21T04:22:56,594 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:56,594 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 72a2e6b9ecc445dc6f2caad25d1a0335, disabling compactions & flushes 2024-11-21T04:22:56,594 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:56,594 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:56,594 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. after waiting 0 ms 2024-11-21T04:22:56,594 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:56,594 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:56,594 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 72a2e6b9ecc445dc6f2caad25d1a0335: Waiting for close lock at 1732162976594Disabling compacts and flushes for region at 1732162976594Disabling writes for close at 1732162976594Writing region close event to WAL at 1732162976594Closed at 1732162976594 2024-11-21T04:22:56,595 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:22:56,596 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732162976595"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732162976595"}]},"ts":"1732162976595"} 2024-11-21T04:22:56,596 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732162976595"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732162976595"}]},"ts":"1732162976595"} 2024-11-21T04:22:56,599 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:22:56,601 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:22:56,601 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162976601"}]},"ts":"1732162976601"} 2024-11-21T04:22:56,603 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-21T04:22:56,604 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:22:56,605 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:22:56,605 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:22:56,605 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:22:56,605 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:22:56,605 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:22:56,605 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:22:56,605 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:22:56,605 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:22:56,605 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:22:56,605 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:22:56,606 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=72a2e6b9ecc445dc6f2caad25d1a0335, ASSIGN}, {pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6e16a083394339512ddf4220646112fd, ASSIGN}] 2024-11-21T04:22:56,607 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6e16a083394339512ddf4220646112fd, ASSIGN 2024-11-21T04:22:56,607 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=72a2e6b9ecc445dc6f2caad25d1a0335, ASSIGN 2024-11-21T04:22:56,609 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=72a2e6b9ecc445dc6f2caad25d1a0335, ASSIGN; state=OFFLINE, location=fdc0a393186c,39347,1732162938035; forceNewPlan=false, retain=false 2024-11-21T04:22:56,609 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6e16a083394339512ddf4220646112fd, ASSIGN; state=OFFLINE, location=fdc0a393186c,34363,1732162938185; forceNewPlan=false, retain=false 2024-11-21T04:22:56,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-21T04:22:56,759 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:22:56,760 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=6e16a083394339512ddf4220646112fd, regionState=OPENING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:22:56,760 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=72a2e6b9ecc445dc6f2caad25d1a0335, regionState=OPENING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:22:56,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=72a2e6b9ecc445dc6f2caad25d1a0335, ASSIGN because future has completed 2024-11-21T04:22:56,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:22:56,764 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6e16a083394339512ddf4220646112fd, ASSIGN because future has completed 2024-11-21T04:22:56,764 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6e16a083394339512ddf4220646112fd, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:22:56,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-21T04:22:56,925 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:56,925 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7752): Opening region: {ENCODED => 72a2e6b9ecc445dc6f2caad25d1a0335, NAME => 'testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T04:22:56,925 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:22:56,925 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. service=AccessControlService 2024-11-21T04:22:56,925 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7752): Opening region: {ENCODED => 6e16a083394339512ddf4220646112fd, NAME => 'testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T04:22:56,926 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:22:56,926 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. service=AccessControlService 2024-11-21T04:22:56,926 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:56,926 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:22:56,926 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:56,926 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:56,926 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7794): checking encryption for 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:56,926 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:56,926 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7797): checking classloading for 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:56,927 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7794): checking encryption for 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:56,927 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7797): checking classloading for 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:56,929 INFO [StoreOpener-6e16a083394339512ddf4220646112fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:56,929 INFO [StoreOpener-72a2e6b9ecc445dc6f2caad25d1a0335-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:56,932 INFO [StoreOpener-6e16a083394339512ddf4220646112fd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e16a083394339512ddf4220646112fd columnFamilyName cf 2024-11-21T04:22:56,932 INFO [StoreOpener-72a2e6b9ecc445dc6f2caad25d1a0335-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72a2e6b9ecc445dc6f2caad25d1a0335 columnFamilyName cf 2024-11-21T04:22:56,932 DEBUG [StoreOpener-72a2e6b9ecc445dc6f2caad25d1a0335-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:56,932 DEBUG [StoreOpener-6e16a083394339512ddf4220646112fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:56,933 INFO [StoreOpener-6e16a083394339512ddf4220646112fd-1 {}] regionserver.HStore(327): Store=6e16a083394339512ddf4220646112fd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:22:56,933 INFO [StoreOpener-72a2e6b9ecc445dc6f2caad25d1a0335-1 {}] regionserver.HStore(327): Store=72a2e6b9ecc445dc6f2caad25d1a0335/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:22:56,933 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1038): replaying wal for 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:56,933 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1038): replaying wal for 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:56,934 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:56,934 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd 2024-11-21T04:22:56,935 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd 2024-11-21T04:22:56,935 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:56,935 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1048): stopping wal replay for 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:56,935 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1048): stopping wal replay for 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:56,935 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1060): Cleaning up temporary data for 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:56,935 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1060): Cleaning up temporary data for 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:56,937 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1093): writing seq id for 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:56,937 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1093): writing seq id for 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:56,940 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:22:56,940 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:22:56,940 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1114): Opened 6e16a083394339512ddf4220646112fd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60701444, jitterRate=-0.09547799825668335}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:22:56,940 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1114): Opened 72a2e6b9ecc445dc6f2caad25d1a0335; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66122116, jitterRate=-0.014703691005706787}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:22:56,940 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:56,940 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:56,941 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1006): Region open journal for 6e16a083394339512ddf4220646112fd: Running coprocessor pre-open hook at 1732162976927Writing region info on filesystem at 1732162976927Initializing all the Stores at 1732162976928 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162976928Cleaning up temporary data from old regions at 1732162976935 (+7 ms)Running coprocessor post-open hooks at 1732162976940 (+5 ms)Region opened successfully at 1732162976941 (+1 ms) 2024-11-21T04:22:56,941 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1006): Region open journal for 72a2e6b9ecc445dc6f2caad25d1a0335: Running coprocessor pre-open hook at 1732162976927Writing region info on filesystem at 1732162976927Initializing all the Stores at 1732162976928 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162976928Cleaning up temporary data from old regions at 1732162976935 (+7 ms)Running coprocessor post-open hooks at 1732162976940 (+5 ms)Region opened successfully at 1732162976941 (+1 ms) 2024-11-21T04:22:56,942 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335., pid=28, masterSystemTime=1732162976916 2024-11-21T04:22:56,942 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd., pid=29, masterSystemTime=1732162976917 2024-11-21T04:22:56,944 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:56,944 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:56,945 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=72a2e6b9ecc445dc6f2caad25d1a0335, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:22:56,945 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:22:56,945 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:22:56,947 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=6e16a083394339512ddf4220646112fd, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:22:56,947 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=28, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:22:56,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6e16a083394339512ddf4220646112fd, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:22:56,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=26 2024-11-21T04:22:56,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=26, state=SUCCESS, hasLock=false; OpenRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335, server=fdc0a393186c,39347,1732162938035 in 185 msec 2024-11-21T04:22:56,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=27 2024-11-21T04:22:56,953 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=72a2e6b9ecc445dc6f2caad25d1a0335, ASSIGN in 345 msec 2024-11-21T04:22:56,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=27, state=SUCCESS, hasLock=false; OpenRegionProcedure 6e16a083394339512ddf4220646112fd, server=fdc0a393186c,34363,1732162938185 in 186 msec 2024-11-21T04:22:56,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-11-21T04:22:56,956 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6e16a083394339512ddf4220646112fd, ASSIGN in 347 msec 2024-11-21T04:22:56,957 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:22:56,957 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162976957"}]},"ts":"1732162976957"} 2024-11-21T04:22:56,959 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-21T04:22:56,961 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:22:56,961 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-21T04:22:56,965 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-21T04:22:56,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:56,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:56,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:56,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:22:56,986 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:22:56,985 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:22:56,986 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:22:56,986 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:22:56,988 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 425 msec 2024-11-21T04:22:57,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-21T04:22:57,193 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-21T04:22:57,193 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-21T04:22:57,193 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:22:57,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-21T04:22:57,198 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:22:57,198 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-11-21T04:22:57,198 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T04:22:57,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-21T04:22:57,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732162977203 (current time:1732162977203). 2024-11-21T04:22:57,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:22:57,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-21T04:22:57,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:22:57,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79f888ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:57,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:22:57,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:22:57,206 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:22:57,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:22:57,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:22:57,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bdc940d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:57,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:22:57,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:22:57,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:57,208 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46844, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:22:57,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@264ab986, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:57,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:22:57,212 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:22:57,212 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:57,214 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41604, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:57,218 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:22:57,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:22:57,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:57,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:57,219 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:22:57,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74318ec1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:57,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:22:57,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:22:57,236 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:22:57,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:22:57,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:22:57,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b06658, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:57,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:22:57,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:22:57,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:57,239 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46866, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:22:57,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f84a304, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:57,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:22:57,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:22:57,244 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:57,245 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41618, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:57,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:22:57,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:57,251 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43648, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:57,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:22:57,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:22:57,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:57,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:57,254 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:22:57,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-21T04:22:57,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:22:57,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-21T04:22:57,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-21T04:22:57,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-21T04:22:57,261 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:22:57,264 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:22:57,269 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:22:57,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741896_1072 (size=161) 2024-11-21T04:22:57,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741896_1072 (size=161) 2024-11-21T04:22:57,347 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:22:57,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335}, {pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e16a083394339512ddf4220646112fd}] 2024-11-21T04:22:57,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741896_1072 (size=161) 2024-11-21T04:22:57,355 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:57,355 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:57,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-21T04:22:57,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=31 2024-11-21T04:22:57,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:57,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=32 2024-11-21T04:22:57,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.HRegion(2603): Flush status journal for 72a2e6b9ecc445dc6f2caad25d1a0335: 2024-11-21T04:22:57,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-21T04:22:57,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-21T04:22:57,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:22:57,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:22:57,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:22:57,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.HRegion(2603): Flush status journal for 6e16a083394339512ddf4220646112fd: 2024-11-21T04:22:57,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-21T04:22:57,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-21T04:22:57,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:22:57,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:22:57,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741897_1073 (size=68) 2024-11-21T04:22:57,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741897_1073 (size=68) 2024-11-21T04:22:57,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741897_1073 (size=68) 2024-11-21T04:22:57,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741898_1074 (size=68) 2024-11-21T04:22:57,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741898_1074 (size=68) 2024-11-21T04:22:57,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741898_1074 (size=68) 2024-11-21T04:22:57,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-21T04:22:57,560 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-21T04:22:57,561 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-21T04:22:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-21T04:22:57,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-21T04:22:57,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:57,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-21T04:22:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=31 2024-11-21T04:22:57,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:57,924 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:57,927 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:22:57,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=32 2024-11-21T04:22:57,928 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335 in 578 msec 2024-11-21T04:22:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=32 2024-11-21T04:22:57,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:57,928 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:57,932 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=30 2024-11-21T04:22:57,932 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6e16a083394339512ddf4220646112fd in 582 msec 2024-11-21T04:22:57,932 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:22:57,933 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:22:57,934 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:22:57,935 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-21T04:22:57,936 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-21T04:22:58,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741899_1075 (size=543) 2024-11-21T04:22:58,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741899_1075 (size=543) 2024-11-21T04:22:58,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741899_1075 (size=543) 2024-11-21T04:22:58,028 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:22:58,041 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:22:58,042 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-21T04:22:58,045 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:22:58,045 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-21T04:22:58,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 790 msec 2024-11-21T04:22:58,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-21T04:22:58,403 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-21T04:22:58,412 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='0e76cd61377ddc0b581951b649147297b', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335., hostname=fdc0a393186c,39347,1732162938035, seqNum=2] 2024-11-21T04:22:58,414 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='1a4b0b8e619dd220697ecd4a8fd32cd66', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:22:58,416 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='25d3cd9dab8d560bbbea14a94248e96c1', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:22:58,417 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='30e5e9fb33b3c6dc52a6b37affe970503', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:22:58,418 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='4785182aaddc1f63dc43db02c8863e9ca', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:22:58,419 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='5f83fd61b19f9fcf70454925402c40730', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:22:58,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39347 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:22:58,426 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:22:58,429 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T04:22:58,432 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-21T04:22:58,432 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:58,433 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:22:58,435 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T04:22:58,441 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T04:22:58,450 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T04:22:58,454 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-21T04:22:58,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732162978454 (current time:1732162978454). 2024-11-21T04:22:58,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:22:58,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-21T04:22:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:22:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5eaf64e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:22:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:22:58,456 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:22:58,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:22:58,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:22:58,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41e0bc3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:58,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:22:58,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:22:58,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:58,459 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46888, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:22:58,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@255fca6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:58,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:22:58,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:22:58,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:58,463 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41620, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:58,464 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:22:58,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:22:58,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:58,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:58,465 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:22:58,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51ef16b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:22:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:22:58,467 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:22:58,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:22:58,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:22:58,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5205b3e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:58,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:22:58,468 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:22:58,468 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:58,469 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:22:58,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e80fab4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:22:58,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:22:58,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:22:58,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:58,473 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41624, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:58,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:22:58,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:22:58,477 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43658, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:22:58,479 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:22:58,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:22:58,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:22:58,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-21T04:22:58,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:22:58,482 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:22:58,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-21T04:22:58,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-21T04:22:58,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-21T04:22:58,484 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:22:58,486 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:22:58,491 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:22:58,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741900_1076 (size=156) 2024-11-21T04:22:58,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741900_1076 (size=156) 2024-11-21T04:22:58,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741900_1076 (size=156) 2024-11-21T04:22:58,513 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:22:58,513 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335}, {pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e16a083394339512ddf4220646112fd}] 2024-11-21T04:22:58,516 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:58,517 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:58,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-21T04:22:58,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=35 2024-11-21T04:22:58,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:22:58,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=34 2024-11-21T04:22:58,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:58,671 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2902): Flushing 6e16a083394339512ddf4220646112fd 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-21T04:22:58,671 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2902): Flushing 72a2e6b9ecc445dc6f2caad25d1a0335 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-21T04:22:58,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/.tmp/cf/fe3ed9aee01445de8ff6e33fee5ab8c7 is 71, key is 071b5ffa64b15289384b71e4bd28114a/cf:q/1732162978421/Put/seqid=0 2024-11-21T04:22:58,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/.tmp/cf/0df5ac04556a4b9481d45ee9b32c7bbc is 71, key is 15eb57cd6d337954a20cdfd1fa70da84/cf:q/1732162978426/Put/seqid=0 2024-11-21T04:22:58,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741901_1077 (size=5288) 2024-11-21T04:22:58,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741901_1077 (size=5288) 2024-11-21T04:22:58,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741901_1077 (size=5288) 2024-11-21T04:22:58,716 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/.tmp/cf/fe3ed9aee01445de8ff6e33fee5ab8c7 2024-11-21T04:22:58,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741902_1078 (size=8324) 2024-11-21T04:22:58,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741902_1078 (size=8324) 2024-11-21T04:22:58,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741902_1078 (size=8324) 2024-11-21T04:22:58,724 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/.tmp/cf/0df5ac04556a4b9481d45ee9b32c7bbc 2024-11-21T04:22:58,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/.tmp/cf/fe3ed9aee01445de8ff6e33fee5ab8c7 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/cf/fe3ed9aee01445de8ff6e33fee5ab8c7 2024-11-21T04:22:58,737 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/cf/fe3ed9aee01445de8ff6e33fee5ab8c7, entries=3, sequenceid=6, filesize=5.2 K 2024-11-21T04:22:58,745 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 72a2e6b9ecc445dc6f2caad25d1a0335 in 74ms, sequenceid=6, compaction requested=false 2024-11-21T04:22:58,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-21T04:22:58,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2603): Flush status journal for 72a2e6b9ecc445dc6f2caad25d1a0335: 2024-11-21T04:22:58,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. for snaptb0-testExportWithResetTtl completed. 2024-11-21T04:22:58,747 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-21T04:22:58,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:22:58,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/cf/fe3ed9aee01445de8ff6e33fee5ab8c7] hfiles 2024-11-21T04:22:58,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/cf/fe3ed9aee01445de8ff6e33fee5ab8c7 for snapshot=snaptb0-testExportWithResetTtl 2024-11-21T04:22:58,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/.tmp/cf/0df5ac04556a4b9481d45ee9b32c7bbc as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/cf/0df5ac04556a4b9481d45ee9b32c7bbc 2024-11-21T04:22:58,769 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/cf/0df5ac04556a4b9481d45ee9b32c7bbc, entries=47, sequenceid=6, filesize=8.1 K 2024-11-21T04:22:58,775 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 6e16a083394339512ddf4220646112fd in 105ms, sequenceid=6, compaction requested=false 2024-11-21T04:22:58,775 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2603): Flush status journal for 6e16a083394339512ddf4220646112fd: 2024-11-21T04:22:58,775 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. for snaptb0-testExportWithResetTtl completed. 2024-11-21T04:22:58,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-21T04:22:58,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:22:58,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/cf/0df5ac04556a4b9481d45ee9b32c7bbc] hfiles 2024-11-21T04:22:58,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/cf/0df5ac04556a4b9481d45ee9b32c7bbc for snapshot=snaptb0-testExportWithResetTtl 2024-11-21T04:22:58,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741903_1079 (size=107) 2024-11-21T04:22:58,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741903_1079 (size=107) 2024-11-21T04:22:58,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741903_1079 (size=107) 2024-11-21T04:22:58,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-21T04:22:58,804 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:22:58,804 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=34 2024-11-21T04:22:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=34 2024-11-21T04:22:58,804 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:58,805 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:22:58,808 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335 in 293 msec 2024-11-21T04:22:58,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741904_1080 (size=107) 2024-11-21T04:22:58,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741904_1080 (size=107) 2024-11-21T04:22:58,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741904_1080 (size=107) 2024-11-21T04:22:58,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:22:58,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-21T04:22:58,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=35 2024-11-21T04:22:58,820 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:58,821 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6e16a083394339512ddf4220646112fd 2024-11-21T04:22:58,830 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-11-21T04:22:58,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6e16a083394339512ddf4220646112fd in 311 msec 2024-11-21T04:22:58,831 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:22:58,833 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:22:58,836 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:22:58,836 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-21T04:22:58,837 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-21T04:22:58,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741905_1081 (size=621) 2024-11-21T04:22:58,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741905_1081 (size=621) 2024-11-21T04:22:58,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741905_1081 (size=621) 2024-11-21T04:22:58,891 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:22:58,906 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:22:58,907 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-21T04:22:58,910 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:22:58,910 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-21T04:22:58,914 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 429 msec 2024-11-21T04:22:59,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-21T04:22:59,113 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-21T04:22:59,115 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:22:59,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-21T04:22:59,118 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:22:59,119 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:22:59,119 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 36 2024-11-21T04:22:59,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-21T04:22:59,120 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:22:59,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741906_1082 (size=397) 2024-11-21T04:22:59,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741906_1082 (size=397) 2024-11-21T04:22:59,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741906_1082 (size=397) 2024-11-21T04:22:59,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-21T04:22:59,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-21T04:22:59,549 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => dc166c3d9abc42b74887fefc13070e5a, NAME => 'testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:59,549 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 408b2e11edc7db1d3ac9edee08307a5e, NAME => 'testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:22:59,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741908_1084 (size=58) 2024-11-21T04:22:59,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741908_1084 (size=58) 2024-11-21T04:22:59,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741907_1083 (size=58) 2024-11-21T04:22:59,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741908_1084 (size=58) 2024-11-21T04:22:59,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741907_1083 (size=58) 2024-11-21T04:22:59,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741907_1083 (size=58) 2024-11-21T04:22:59,570 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:59,570 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 408b2e11edc7db1d3ac9edee08307a5e, disabling compactions & flushes 2024-11-21T04:22:59,570 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:22:59,570 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:22:59,570 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. after waiting 0 ms 2024-11-21T04:22:59,570 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:22:59,570 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:22:59,571 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 408b2e11edc7db1d3ac9edee08307a5e: Waiting for close lock at 1732162979570Disabling compacts and flushes for region at 1732162979570Disabling writes for close at 1732162979570Writing region close event to WAL at 1732162979570Closed at 1732162979570 2024-11-21T04:22:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-21T04:22:59,972 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:22:59,972 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing dc166c3d9abc42b74887fefc13070e5a, disabling compactions & flushes 2024-11-21T04:22:59,972 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:22:59,972 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:22:59,972 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. after waiting 0 ms 2024-11-21T04:22:59,972 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:22:59,972 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:22:59,972 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for dc166c3d9abc42b74887fefc13070e5a: Waiting for close lock at 1732162979972Disabling compacts and flushes for region at 1732162979972Disabling writes for close at 1732162979972Writing region close event to WAL at 1732162979972Closed at 1732162979972 2024-11-21T04:22:59,974 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:22:59,974 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732162979974"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732162979974"}]},"ts":"1732162979974"} 2024-11-21T04:22:59,974 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732162979974"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732162979974"}]},"ts":"1732162979974"} 2024-11-21T04:22:59,978 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:22:59,979 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:22:59,979 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162979979"}]},"ts":"1732162979979"} 2024-11-21T04:22:59,981 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-21T04:22:59,982 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:22:59,983 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:22:59,983 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:22:59,983 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:22:59,983 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:22:59,983 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:22:59,983 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:22:59,983 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:22:59,983 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:22:59,983 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:22:59,983 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:22:59,984 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=dc166c3d9abc42b74887fefc13070e5a, ASSIGN}, {pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=408b2e11edc7db1d3ac9edee08307a5e, ASSIGN}] 2024-11-21T04:22:59,985 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=dc166c3d9abc42b74887fefc13070e5a, ASSIGN 2024-11-21T04:22:59,985 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=408b2e11edc7db1d3ac9edee08307a5e, ASSIGN 2024-11-21T04:22:59,986 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=dc166c3d9abc42b74887fefc13070e5a, ASSIGN; state=OFFLINE, location=fdc0a393186c,34363,1732162938185; forceNewPlan=false, retain=false 2024-11-21T04:22:59,986 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=408b2e11edc7db1d3ac9edee08307a5e, ASSIGN; state=OFFLINE, location=fdc0a393186c,42393,1732162938268; forceNewPlan=false, retain=false 2024-11-21T04:23:00,137 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:23:00,137 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=dc166c3d9abc42b74887fefc13070e5a, regionState=OPENING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:23:00,137 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=408b2e11edc7db1d3ac9edee08307a5e, regionState=OPENING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:23:00,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=dc166c3d9abc42b74887fefc13070e5a, ASSIGN because future has completed 2024-11-21T04:23:00,140 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc166c3d9abc42b74887fefc13070e5a, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:23:00,140 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=408b2e11edc7db1d3ac9edee08307a5e, ASSIGN because future has completed 2024-11-21T04:23:00,141 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure 408b2e11edc7db1d3ac9edee08307a5e, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:23:00,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-21T04:23:00,296 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:23:00,296 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:23:00,296 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7752): Opening region: {ENCODED => 408b2e11edc7db1d3ac9edee08307a5e, NAME => 'testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T04:23:00,296 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7752): Opening region: {ENCODED => dc166c3d9abc42b74887fefc13070e5a, NAME => 'testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T04:23:00,297 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. service=AccessControlService 2024-11-21T04:23:00,297 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. service=AccessControlService 2024-11-21T04:23:00,297 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:23:00,297 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:23:00,297 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:00,297 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:00,297 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:23:00,297 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:23:00,297 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7794): checking encryption for 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:00,297 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7797): checking classloading for 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:00,297 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7794): checking encryption for dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:00,297 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7797): checking classloading for dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:00,299 INFO [StoreOpener-408b2e11edc7db1d3ac9edee08307a5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:00,299 INFO [StoreOpener-dc166c3d9abc42b74887fefc13070e5a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:00,300 INFO [StoreOpener-dc166c3d9abc42b74887fefc13070e5a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dc166c3d9abc42b74887fefc13070e5a columnFamilyName cf 2024-11-21T04:23:00,300 INFO [StoreOpener-408b2e11edc7db1d3ac9edee08307a5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 408b2e11edc7db1d3ac9edee08307a5e columnFamilyName cf 2024-11-21T04:23:00,301 DEBUG [StoreOpener-dc166c3d9abc42b74887fefc13070e5a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:23:00,301 DEBUG [StoreOpener-408b2e11edc7db1d3ac9edee08307a5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:23:00,301 INFO [StoreOpener-408b2e11edc7db1d3ac9edee08307a5e-1 {}] regionserver.HStore(327): Store=408b2e11edc7db1d3ac9edee08307a5e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:23:00,301 INFO [StoreOpener-dc166c3d9abc42b74887fefc13070e5a-1 {}] regionserver.HStore(327): Store=dc166c3d9abc42b74887fefc13070e5a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:23:00,302 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1038): replaying wal for dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:00,302 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1038): replaying wal for 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:00,303 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:00,303 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:00,303 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:00,303 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:00,304 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1048): stopping wal replay for dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:00,304 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1048): stopping wal replay for 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:00,304 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1060): Cleaning up temporary data for dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:00,304 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1060): Cleaning up temporary data for 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:00,305 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1093): writing seq id for dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:00,305 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1093): writing seq id for 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:00,308 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:23:00,308 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:23:00,308 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1114): Opened dc166c3d9abc42b74887fefc13070e5a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63903843, jitterRate=-0.04775853455066681}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:23:00,308 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1114): Opened 408b2e11edc7db1d3ac9edee08307a5e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74736882, jitterRate=0.11366632580757141}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:23:00,308 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:00,308 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:00,309 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1006): Region open journal for 408b2e11edc7db1d3ac9edee08307a5e: Running coprocessor pre-open hook at 1732162980297Writing region info on filesystem at 1732162980297Initializing all the Stores at 1732162980298 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162980298Cleaning up temporary data from old regions at 1732162980304 (+6 ms)Running coprocessor post-open hooks at 1732162980308 (+4 ms)Region opened successfully at 1732162980309 (+1 ms) 2024-11-21T04:23:00,309 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1006): Region open journal for dc166c3d9abc42b74887fefc13070e5a: Running coprocessor pre-open hook at 1732162980297Writing region info on filesystem at 1732162980298 (+1 ms)Initializing all the Stores at 1732162980298Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162980298Cleaning up temporary data from old regions at 1732162980304 (+6 ms)Running coprocessor post-open hooks at 1732162980308 (+4 ms)Region opened successfully at 1732162980309 (+1 ms) 2024-11-21T04:23:00,310 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a., pid=39, masterSystemTime=1732162980292 2024-11-21T04:23:00,316 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e., pid=40, masterSystemTime=1732162980292 2024-11-21T04:23:00,327 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:23:00,328 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:23:00,329 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=dc166c3d9abc42b74887fefc13070e5a, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:23:00,330 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:23:00,330 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:23:00,331 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=408b2e11edc7db1d3ac9edee08307a5e, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:23:00,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc166c3d9abc42b74887fefc13070e5a, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:23:00,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure 408b2e11edc7db1d3ac9edee08307a5e, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:23:00,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=37 2024-11-21T04:23:00,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=37, state=SUCCESS, hasLock=false; OpenRegionProcedure dc166c3d9abc42b74887fefc13070e5a, server=fdc0a393186c,34363,1732162938185 in 200 msec 2024-11-21T04:23:00,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=dc166c3d9abc42b74887fefc13070e5a, ASSIGN in 362 msec 2024-11-21T04:23:00,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=38 2024-11-21T04:23:00,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=38, state=SUCCESS, hasLock=false; OpenRegionProcedure 408b2e11edc7db1d3ac9edee08307a5e, server=fdc0a393186c,42393,1732162938268 in 202 msec 2024-11-21T04:23:00,350 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=38, resume processing ppid=36 2024-11-21T04:23:00,350 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=408b2e11edc7db1d3ac9edee08307a5e, ASSIGN in 364 msec 2024-11-21T04:23:00,351 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:23:00,351 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162980351"}]},"ts":"1732162980351"} 2024-11-21T04:23:00,353 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-21T04:23:00,357 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:23:00,357 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-21T04:23:00,363 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-21T04:23:00,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:00,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:00,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:00,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:00,431 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:00,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:00,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:00,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:00,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:00,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:00,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:00,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:00,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 1.3170 sec 2024-11-21T04:23:00,814 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0001_000001 (auth:SIMPLE) from 127.0.0.1:53306 2024-11-21T04:23:00,822 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_2/usercache/jenkins/appcache/application_1732162945431_0001/container_1732162945431_0001_01_000001/launch_container.sh] 2024-11-21T04:23:00,822 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_2/usercache/jenkins/appcache/application_1732162945431_0001/container_1732162945431_0001_01_000001/container_tokens] 2024-11-21T04:23:00,822 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_2/usercache/jenkins/appcache/application_1732162945431_0001/container_1732162945431_0001_01_000001/sysfs] 2024-11-21T04:23:01,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-21T04:23:01,273 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-21T04:23:01,273 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-21T04:23:01,273 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:23:01,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-21T04:23:01,278 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:23:01,278 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-11-21T04:23:01,278 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T04:23:01,288 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='01917d695d508361a3c7f94c12f27a51d', locateType=CURRENT is [region=testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:23:01,290 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='18287572e70cf49d053b55fe4020628a5', locateType=CURRENT is [region=testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:23:01,292 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='397689e1c19ed438ef1813c4f85d82170', locateType=CURRENT is [region=testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:23:01,294 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='2603805e343e775bf95fb323b66e175a1', locateType=CURRENT is [region=testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:23:01,296 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='5f7c160c67da40b1041b26cb12a5d4c6f', locateType=CURRENT is [region=testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:23:01,297 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='440088cea503ec8a607910ac2915fe07a', locateType=CURRENT is [region=testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:23:01,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:23:01,302 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42393 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:23:01,304 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T04:23:01,307 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-21T04:23:01,307 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:23:01,308 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:23:01,310 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T04:23:01,317 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T04:23:01,325 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-21T04:23:01,329 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-21T04:23:01,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732162981329 (current time:1732162981329). 2024-11-21T04:23:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-21T04:23:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:23:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bebc98d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:23:01,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:23:01,331 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:23:01,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:23:01,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:23:01,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3baa1828, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:01,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:23:01,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:23:01,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:01,333 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46936, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:23:01,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3426efa0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:23:01,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:23:01,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:01,336 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41634, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:01,337 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:23:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:23:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:01,338 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:23:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cf332a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:23:01,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:23:01,339 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:23:01,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:23:01,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:23:01,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f25fecf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:01,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:23:01,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:23:01,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:01,341 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46952, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:23:01,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28288db8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:01,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:23:01,343 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:23:01,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:01,345 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41640, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:01,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:23:01,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:01,349 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43670, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:01,350 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:23:01,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:23:01,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:01,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:01,351 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:23:01,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-21T04:23:01,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:23:01,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-21T04:23:01,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-21T04:23:01,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-21T04:23:01,354 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:23:01,355 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:23:01,358 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:23:01,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741909_1085 (size=143) 2024-11-21T04:23:01,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741909_1085 (size=143) 2024-11-21T04:23:01,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741909_1085 (size=143) 2024-11-21T04:23:01,378 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:23:01,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc166c3d9abc42b74887fefc13070e5a}, {pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 408b2e11edc7db1d3ac9edee08307a5e}] 2024-11-21T04:23:01,381 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:01,381 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:01,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-21T04:23:01,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=42 2024-11-21T04:23:01,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=43 2024-11-21T04:23:01,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:23:01,534 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2902): Flushing 408b2e11edc7db1d3ac9edee08307a5e 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-21T04:23:01,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:23:01,535 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2902): Flushing dc166c3d9abc42b74887fefc13070e5a 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-21T04:23:01,561 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/.tmp/cf/6c00235ab5c4431490086ce0a760fc5c is 71, key is 102708ec170d8212571ed85ecbda70aa/cf:q/1732162981301/Put/seqid=0 2024-11-21T04:23:01,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/.tmp/cf/fe14c64adabb46c68a88c7c5dd9e5547 is 71, key is 010fb1b8fcce46d82ea37b7d371b2fe6/cf:q/1732162981298/Put/seqid=0 2024-11-21T04:23:01,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741910_1086 (size=8324) 2024-11-21T04:23:01,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741910_1086 (size=8324) 2024-11-21T04:23:01,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741910_1086 (size=8324) 2024-11-21T04:23:01,587 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/.tmp/cf/6c00235ab5c4431490086ce0a760fc5c 2024-11-21T04:23:01,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/.tmp/cf/6c00235ab5c4431490086ce0a760fc5c as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/cf/6c00235ab5c4431490086ce0a760fc5c 2024-11-21T04:23:01,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741911_1087 (size=5286) 2024-11-21T04:23:01,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741911_1087 (size=5286) 2024-11-21T04:23:01,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741911_1087 (size=5286) 2024-11-21T04:23:01,598 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/.tmp/cf/fe14c64adabb46c68a88c7c5dd9e5547 2024-11-21T04:23:01,607 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/cf/6c00235ab5c4431490086ce0a760fc5c, entries=47, sequenceid=5, filesize=8.1 K 2024-11-21T04:23:01,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/.tmp/cf/fe14c64adabb46c68a88c7c5dd9e5547 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/cf/fe14c64adabb46c68a88c7c5dd9e5547 2024-11-21T04:23:01,609 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 408b2e11edc7db1d3ac9edee08307a5e in 74ms, sequenceid=5, compaction requested=false 2024-11-21T04:23:01,609 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-21T04:23:01,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2603): Flush status journal for 408b2e11edc7db1d3ac9edee08307a5e: 2024-11-21T04:23:01,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. for snaptb-testExportWithResetTtl completed. 2024-11-21T04:23:01,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-21T04:23:01,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:23:01,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/cf/6c00235ab5c4431490086ce0a760fc5c] hfiles 2024-11-21T04:23:01,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/cf/6c00235ab5c4431490086ce0a760fc5c for snapshot=snaptb-testExportWithResetTtl 2024-11-21T04:23:01,617 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/cf/fe14c64adabb46c68a88c7c5dd9e5547, entries=3, sequenceid=5, filesize=5.2 K 2024-11-21T04:23:01,618 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for dc166c3d9abc42b74887fefc13070e5a in 83ms, sequenceid=5, compaction requested=false 2024-11-21T04:23:01,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2603): Flush status journal for dc166c3d9abc42b74887fefc13070e5a: 2024-11-21T04:23:01,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. for snaptb-testExportWithResetTtl completed. 2024-11-21T04:23:01,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-21T04:23:01,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:23:01,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/cf/fe14c64adabb46c68a88c7c5dd9e5547] hfiles 2024-11-21T04:23:01,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/cf/fe14c64adabb46c68a88c7c5dd9e5547 for snapshot=snaptb-testExportWithResetTtl 2024-11-21T04:23:01,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741912_1088 (size=100) 2024-11-21T04:23:01,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741912_1088 (size=100) 2024-11-21T04:23:01,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741912_1088 (size=100) 2024-11-21T04:23:01,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:23:01,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=43 2024-11-21T04:23:01,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=43 2024-11-21T04:23:01,637 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:01,638 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:01,641 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 408b2e11edc7db1d3ac9edee08307a5e in 260 msec 2024-11-21T04:23:01,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741913_1089 (size=100) 2024-11-21T04:23:01,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741913_1089 (size=100) 2024-11-21T04:23:01,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741913_1089 (size=100) 2024-11-21T04:23:01,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:23:01,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-11-21T04:23:01,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=42 2024-11-21T04:23:01,654 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:01,655 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:01,659 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-11-21T04:23:01,659 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:23:01,660 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure dc166c3d9abc42b74887fefc13070e5a in 277 msec 2024-11-21T04:23:01,661 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:23:01,661 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:23:01,662 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-21T04:23:01,662 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-21T04:23:01,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-21T04:23:01,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741914_1090 (size=600) 2024-11-21T04:23:01,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741914_1090 (size=600) 2024-11-21T04:23:01,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741914_1090 (size=600) 2024-11-21T04:23:01,697 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:23:01,706 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:23:01,706 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-21T04:23:01,708 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:23:01,709 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-21T04:23:01,711 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 357 msec 2024-11-21T04:23:01,802 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:23:01,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-21T04:23:01,983 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-21T04:23:01,995 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162981994 2024-11-21T04:23:01,995 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44067, tgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162981994, rawTgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162981994, srcFsUri=hdfs://localhost:44067, srcDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:02,025 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44067, inputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:02,025 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162981994, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162981994/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-21T04:23:02,027 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T04:23:02,034 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162981994/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-21T04:23:02,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741916_1092 (size=600) 2024-11-21T04:23:02,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741916_1092 (size=600) 2024-11-21T04:23:02,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741916_1092 (size=600) 2024-11-21T04:23:02,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741915_1091 (size=143) 2024-11-21T04:23:02,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741915_1091 (size=143) 2024-11-21T04:23:02,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741915_1091 (size=143) 2024-11-21T04:23:02,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741917_1093 (size=141) 2024-11-21T04:23:02,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741917_1093 (size=141) 2024-11-21T04:23:02,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741917_1093 (size=141) 2024-11-21T04:23:02,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:02,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:02,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:02,663 INFO [master/fdc0a393186c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-21T04:23:02,663 INFO [master/fdc0a393186c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-21T04:23:03,193 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-15574849789719630443.jar 2024-11-21T04:23:03,194 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:03,194 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:03,282 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-10821237762172221987.jar 2024-11-21T04:23:03,282 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:03,283 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:03,283 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:03,284 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:03,284 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:03,284 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:03,285 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T04:23:03,285 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T04:23:03,285 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T04:23:03,286 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T04:23:03,286 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T04:23:03,286 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T04:23:03,287 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T04:23:03,287 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T04:23:03,287 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T04:23:03,288 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T04:23:03,288 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T04:23:03,289 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:03,289 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:03,289 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:03,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:03,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:03,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:03,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:03,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741918_1094 (size=131440) 2024-11-21T04:23:03,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741918_1094 (size=131440) 2024-11-21T04:23:03,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741918_1094 (size=131440) 2024-11-21T04:23:03,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741919_1095 (size=4188619) 2024-11-21T04:23:03,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741919_1095 (size=4188619) 2024-11-21T04:23:03,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741919_1095 (size=4188619) 2024-11-21T04:23:03,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741920_1096 (size=1323991) 2024-11-21T04:23:03,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741920_1096 (size=1323991) 2024-11-21T04:23:03,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741920_1096 (size=1323991) 2024-11-21T04:23:03,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741921_1097 (size=903734) 2024-11-21T04:23:03,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741921_1097 (size=903734) 2024-11-21T04:23:03,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741921_1097 (size=903734) 2024-11-21T04:23:03,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741922_1098 (size=8360083) 2024-11-21T04:23:03,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741922_1098 (size=8360083) 2024-11-21T04:23:03,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741922_1098 (size=8360083) 2024-11-21T04:23:03,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741923_1099 (size=1877034) 2024-11-21T04:23:03,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741923_1099 (size=1877034) 2024-11-21T04:23:03,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741923_1099 (size=1877034) 2024-11-21T04:23:03,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741924_1100 (size=77835) 2024-11-21T04:23:03,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741924_1100 (size=77835) 2024-11-21T04:23:03,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741924_1100 (size=77835) 2024-11-21T04:23:03,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741925_1101 (size=30949) 2024-11-21T04:23:03,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741925_1101 (size=30949) 2024-11-21T04:23:03,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741925_1101 (size=30949) 2024-11-21T04:23:03,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741926_1102 (size=1597327) 2024-11-21T04:23:03,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741926_1102 (size=1597327) 2024-11-21T04:23:03,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741926_1102 (size=1597327) 2024-11-21T04:23:03,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741927_1103 (size=4695811) 2024-11-21T04:23:03,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741927_1103 (size=4695811) 2024-11-21T04:23:03,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741927_1103 (size=4695811) 2024-11-21T04:23:03,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741928_1104 (size=232957) 2024-11-21T04:23:03,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741928_1104 (size=232957) 2024-11-21T04:23:03,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741928_1104 (size=232957) 2024-11-21T04:23:03,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741929_1105 (size=127628) 2024-11-21T04:23:03,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741929_1105 (size=127628) 2024-11-21T04:23:03,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741929_1105 (size=127628) 2024-11-21T04:23:03,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741930_1106 (size=20406) 2024-11-21T04:23:03,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741930_1106 (size=20406) 2024-11-21T04:23:03,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741930_1106 (size=20406) 2024-11-21T04:23:03,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741931_1107 (size=6424740) 2024-11-21T04:23:03,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741931_1107 (size=6424740) 2024-11-21T04:23:03,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741931_1107 (size=6424740) 2024-11-21T04:23:03,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741932_1108 (size=5175431) 2024-11-21T04:23:03,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741932_1108 (size=5175431) 2024-11-21T04:23:03,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741932_1108 (size=5175431) 2024-11-21T04:23:03,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741933_1109 (size=217634) 2024-11-21T04:23:03,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741933_1109 (size=217634) 2024-11-21T04:23:03,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741933_1109 (size=217634) 2024-11-21T04:23:03,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741934_1110 (size=1832290) 2024-11-21T04:23:03,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741934_1110 (size=1832290) 2024-11-21T04:23:03,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741934_1110 (size=1832290) 2024-11-21T04:23:03,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741935_1111 (size=322274) 2024-11-21T04:23:03,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741935_1111 (size=322274) 2024-11-21T04:23:03,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741935_1111 (size=322274) 2024-11-21T04:23:03,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741936_1112 (size=503880) 2024-11-21T04:23:03,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741936_1112 (size=503880) 2024-11-21T04:23:03,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741936_1112 (size=503880) 2024-11-21T04:23:03,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741937_1113 (size=29229) 2024-11-21T04:23:03,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741937_1113 (size=29229) 2024-11-21T04:23:03,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741937_1113 (size=29229) 2024-11-21T04:23:03,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741938_1114 (size=24096) 2024-11-21T04:23:03,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741938_1114 (size=24096) 2024-11-21T04:23:03,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741938_1114 (size=24096) 2024-11-21T04:23:03,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741939_1115 (size=440656) 2024-11-21T04:23:03,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741939_1115 (size=440656) 2024-11-21T04:23:03,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741939_1115 (size=440656) 2024-11-21T04:23:03,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741940_1116 (size=111872) 2024-11-21T04:23:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741940_1116 (size=111872) 2024-11-21T04:23:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741940_1116 (size=111872) 2024-11-21T04:23:03,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741941_1117 (size=45609) 2024-11-21T04:23:03,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741941_1117 (size=45609) 2024-11-21T04:23:03,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741941_1117 (size=45609) 2024-11-21T04:23:03,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741942_1118 (size=136454) 2024-11-21T04:23:03,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741942_1118 (size=136454) 2024-11-21T04:23:03,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741942_1118 (size=136454) 2024-11-21T04:23:03,832 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T04:23:03,835 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-21T04:23:03,837 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T04:23:03,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741943_1119 (size=324) 2024-11-21T04:23:03,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741943_1119 (size=324) 2024-11-21T04:23:03,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741943_1119 (size=324) 2024-11-21T04:23:03,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741944_1120 (size=15) 2024-11-21T04:23:03,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741944_1120 (size=15) 2024-11-21T04:23:03,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741944_1120 (size=15) 2024-11-21T04:23:03,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741945_1121 (size=303730) 2024-11-21T04:23:03,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741945_1121 (size=303730) 2024-11-21T04:23:03,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741945_1121 (size=303730) 2024-11-21T04:23:03,919 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:23:03,919 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:23:04,545 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0002_000001 (auth:SIMPLE) from 127.0.0.1:56362 2024-11-21T04:23:07,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-21T04:23:07,560 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-21T04:23:10,936 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0002_000001 (auth:SIMPLE) from 127.0.0.1:45712 2024-11-21T04:23:11,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741946_1122 (size=349380) 2024-11-21T04:23:11,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741946_1122 (size=349380) 2024-11-21T04:23:11,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741946_1122 (size=349380) 2024-11-21T04:23:13,184 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0002_000001 (auth:SIMPLE) from 127.0.0.1:39802 2024-11-21T04:23:16,349 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:23:16,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741947_1123 (size=8324) 2024-11-21T04:23:16,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741947_1123 (size=8324) 2024-11-21T04:23:16,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741947_1123 (size=8324) 2024-11-21T04:23:16,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741948_1124 (size=5286) 2024-11-21T04:23:16,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741948_1124 (size=5286) 2024-11-21T04:23:16,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741948_1124 (size=5286) 2024-11-21T04:23:16,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741949_1125 (size=17398) 2024-11-21T04:23:16,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741949_1125 (size=17398) 2024-11-21T04:23:16,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741949_1125 (size=17398) 2024-11-21T04:23:16,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741950_1126 (size=461) 2024-11-21T04:23:16,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741950_1126 (size=461) 2024-11-21T04:23:16,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741950_1126 (size=461) 2024-11-21T04:23:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741951_1127 (size=17398) 2024-11-21T04:23:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741951_1127 (size=17398) 2024-11-21T04:23:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741951_1127 (size=17398) 2024-11-21T04:23:17,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741952_1128 (size=349380) 2024-11-21T04:23:17,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741952_1128 (size=349380) 2024-11-21T04:23:17,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741952_1128 (size=349380) 2024-11-21T04:23:17,048 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0002_000001 (auth:SIMPLE) from 127.0.0.1:39808 2024-11-21T04:23:18,209 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T04:23:18,210 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T04:23:18,224 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb-testExportWithResetTtl 2024-11-21T04:23:18,224 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T04:23:18,225 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T04:23:18,225 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-21T04:23:18,226 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-21T04:23:18,226 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-21T04:23:18,226 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162981994/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162981994/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-21T04:23:18,227 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162981994/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-21T04:23:18,227 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732162981994/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-21T04:23:18,249 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-11-21T04:23:18,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-21T04:23:18,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-21T04:23:18,258 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162998258"}]},"ts":"1732162998258"} 2024-11-21T04:23:18,265 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-21T04:23:18,265 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-21T04:23:18,267 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-21T04:23:18,270 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=dc166c3d9abc42b74887fefc13070e5a, UNASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=408b2e11edc7db1d3ac9edee08307a5e, UNASSIGN}] 2024-11-21T04:23:18,272 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=408b2e11edc7db1d3ac9edee08307a5e, UNASSIGN 2024-11-21T04:23:18,272 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=dc166c3d9abc42b74887fefc13070e5a, UNASSIGN 2024-11-21T04:23:18,273 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=408b2e11edc7db1d3ac9edee08307a5e, regionState=CLOSING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:23:18,274 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=dc166c3d9abc42b74887fefc13070e5a, regionState=CLOSING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:23:18,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=408b2e11edc7db1d3ac9edee08307a5e, UNASSIGN because future has completed 2024-11-21T04:23:18,278 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:23:18,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure 408b2e11edc7db1d3ac9edee08307a5e, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:23:18,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=dc166c3d9abc42b74887fefc13070e5a, UNASSIGN because future has completed 2024-11-21T04:23:18,279 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:23:18,279 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure dc166c3d9abc42b74887fefc13070e5a, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:23:18,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-21T04:23:18,432 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(122): Close 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:18,433 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:23:18,433 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1722): Closing 408b2e11edc7db1d3ac9edee08307a5e, disabling compactions & flushes 2024-11-21T04:23:18,433 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:23:18,433 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:23:18,433 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. after waiting 0 ms 2024-11-21T04:23:18,433 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:23:18,435 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(122): Close dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:18,435 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:23:18,435 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1722): Closing dc166c3d9abc42b74887fefc13070e5a, disabling compactions & flushes 2024-11-21T04:23:18,435 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:23:18,435 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:23:18,435 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. after waiting 0 ms 2024-11-21T04:23:18,435 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:23:18,441 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T04:23:18,442 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T04:23:18,442 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:23:18,442 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:23:18,442 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a. 2024-11-21T04:23:18,442 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e. 2024-11-21T04:23:18,442 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1676): Region close journal for dc166c3d9abc42b74887fefc13070e5a: Waiting for close lock at 1732162998435Running coprocessor pre-close hooks at 1732162998435Disabling compacts and flushes for region at 1732162998435Disabling writes for close at 1732162998435Writing region close event to WAL at 1732162998437 (+2 ms)Running coprocessor post-close hooks at 1732162998442 (+5 ms)Closed at 1732162998442 2024-11-21T04:23:18,443 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1676): Region close journal for 408b2e11edc7db1d3ac9edee08307a5e: Waiting for close lock at 1732162998433Running coprocessor pre-close hooks at 1732162998433Disabling compacts and flushes for region at 1732162998433Disabling writes for close at 1732162998433Writing region close event to WAL at 1732162998434 (+1 ms)Running coprocessor post-close hooks at 1732162998442 (+8 ms)Closed at 1732162998442 2024-11-21T04:23:18,445 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(157): Closed 408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:18,446 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=408b2e11edc7db1d3ac9edee08307a5e, regionState=CLOSED 2024-11-21T04:23:18,446 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(157): Closed dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:18,447 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=dc166c3d9abc42b74887fefc13070e5a, regionState=CLOSED 2024-11-21T04:23:18,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure 408b2e11edc7db1d3ac9edee08307a5e, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:23:18,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure dc166c3d9abc42b74887fefc13070e5a, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:23:18,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-11-21T04:23:18,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; CloseRegionProcedure 408b2e11edc7db1d3ac9edee08307a5e, server=fdc0a393186c,42393,1732162938268 in 171 msec 2024-11-21T04:23:18,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-11-21T04:23:18,453 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=408b2e11edc7db1d3ac9edee08307a5e, UNASSIGN in 182 msec 2024-11-21T04:23:18,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; CloseRegionProcedure dc166c3d9abc42b74887fefc13070e5a, server=fdc0a393186c,34363,1732162938185 in 172 msec 2024-11-21T04:23:18,455 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-11-21T04:23:18,455 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=dc166c3d9abc42b74887fefc13070e5a, UNASSIGN in 184 msec 2024-11-21T04:23:18,458 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=45, resume processing ppid=44 2024-11-21T04:23:18,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, ppid=44, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 189 msec 2024-11-21T04:23:18,460 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162998460"}]},"ts":"1732162998460"} 2024-11-21T04:23:18,462 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-21T04:23:18,462 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-21T04:23:18,465 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 214 msec 2024-11-21T04:23:18,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-21T04:23:18,572 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-21T04:23:18,573 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-11-21T04:23:18,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T04:23:18,575 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T04:23:18,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-21T04:23:18,576 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=50, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T04:23:18,578 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-21T04:23:18,581 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:18,581 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:18,583 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/recovered.edits] 2024-11-21T04:23:18,583 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/recovered.edits] 2024-11-21T04:23:18,587 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/cf/fe14c64adabb46c68a88c7c5dd9e5547 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/cf/fe14c64adabb46c68a88c7c5dd9e5547 2024-11-21T04:23:18,587 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/cf/6c00235ab5c4431490086ce0a760fc5c to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/cf/6c00235ab5c4431490086ce0a760fc5c 2024-11-21T04:23:18,591 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/recovered.edits/8.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a/recovered.edits/8.seqid 2024-11-21T04:23:18,591 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/recovered.edits/8.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e/recovered.edits/8.seqid 2024-11-21T04:23:18,591 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/408b2e11edc7db1d3ac9edee08307a5e 2024-11-21T04:23:18,591 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportWithResetTtl/dc166c3d9abc42b74887fefc13070e5a 2024-11-21T04:23:18,591 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-21T04:23:18,593 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=50, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T04:23:18,596 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-21T04:23:18,598 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-21T04:23:18,599 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=50, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T04:23:18,599 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-21T04:23:18,600 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732162998600"}]},"ts":"9223372036854775807"} 2024-11-21T04:23:18,600 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732162998600"}]},"ts":"9223372036854775807"} 2024-11-21T04:23:18,602 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T04:23:18,602 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => dc166c3d9abc42b74887fefc13070e5a, NAME => 'testExportWithResetTtl,,1732162979114.dc166c3d9abc42b74887fefc13070e5a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 408b2e11edc7db1d3ac9edee08307a5e, NAME => 'testExportWithResetTtl,1,1732162979114.408b2e11edc7db1d3ac9edee08307a5e.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T04:23:18,602 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-21T04:23:18,602 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732162998602"}]},"ts":"9223372036854775807"} 2024-11-21T04:23:18,605 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-21T04:23:18,605 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=50, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-21T04:23:18,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 32 msec 2024-11-21T04:23:18,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T04:23:18,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T04:23:18,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T04:23:18,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T04:23:18,633 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-21T04:23:18,633 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-21T04:23:18,633 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-21T04:23:18,633 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-21T04:23:18,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T04:23:18,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T04:23:18,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T04:23:18,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-21T04:23:18,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:18,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:18,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:18,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:18,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-21T04:23:18,645 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-21T04:23:18,646 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-21T04:23:18,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:18,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:18,646 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-11-21T04:23:18,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:18,646 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:18,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=51, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T04:23:18,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-21T04:23:18,650 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162998650"}]},"ts":"1732162998650"} 2024-11-21T04:23:18,652 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-21T04:23:18,652 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-21T04:23:18,653 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-21T04:23:18,654 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=72a2e6b9ecc445dc6f2caad25d1a0335, UNASSIGN}, {pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6e16a083394339512ddf4220646112fd, UNASSIGN}] 2024-11-21T04:23:18,655 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6e16a083394339512ddf4220646112fd, UNASSIGN 2024-11-21T04:23:18,656 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=72a2e6b9ecc445dc6f2caad25d1a0335, UNASSIGN 2024-11-21T04:23:18,656 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=6e16a083394339512ddf4220646112fd, regionState=CLOSING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:23:18,656 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=72a2e6b9ecc445dc6f2caad25d1a0335, regionState=CLOSING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:23:18,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=72a2e6b9ecc445dc6f2caad25d1a0335, UNASSIGN because future has completed 2024-11-21T04:23:18,659 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:23:18,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=55, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:23:18,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6e16a083394339512ddf4220646112fd, UNASSIGN because future has completed 2024-11-21T04:23:18,660 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:23:18,660 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=56, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6e16a083394339512ddf4220646112fd, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:23:18,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-21T04:23:18,811 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(122): Close 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:23:18,811 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(122): Close 6e16a083394339512ddf4220646112fd 2024-11-21T04:23:18,812 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:23:18,812 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:23:18,812 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1722): Closing 72a2e6b9ecc445dc6f2caad25d1a0335, disabling compactions & flushes 2024-11-21T04:23:18,812 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1722): Closing 6e16a083394339512ddf4220646112fd, disabling compactions & flushes 2024-11-21T04:23:18,812 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:23:18,812 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:23:18,812 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:23:18,812 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:23:18,812 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. after waiting 0 ms 2024-11-21T04:23:18,812 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. after waiting 0 ms 2024-11-21T04:23:18,812 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:23:18,812 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:23:18,818 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:23:18,819 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:23:18,819 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:23:18,819 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd. 2024-11-21T04:23:18,819 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1676): Region close journal for 6e16a083394339512ddf4220646112fd: Waiting for close lock at 1732162998812Running coprocessor pre-close hooks at 1732162998812Disabling compacts and flushes for region at 1732162998812Disabling writes for close at 1732162998812Writing region close event to WAL at 1732162998814 (+2 ms)Running coprocessor post-close hooks at 1732162998819 (+5 ms)Closed at 1732162998819 2024-11-21T04:23:18,819 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:23:18,820 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335. 2024-11-21T04:23:18,820 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1676): Region close journal for 72a2e6b9ecc445dc6f2caad25d1a0335: Waiting for close lock at 1732162998812Running coprocessor pre-close hooks at 1732162998812Disabling compacts and flushes for region at 1732162998812Disabling writes for close at 1732162998812Writing region close event to WAL at 1732162998813 (+1 ms)Running coprocessor post-close hooks at 1732162998819 (+6 ms)Closed at 1732162998820 (+1 ms) 2024-11-21T04:23:18,821 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(157): Closed 6e16a083394339512ddf4220646112fd 2024-11-21T04:23:18,822 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=6e16a083394339512ddf4220646112fd, regionState=CLOSED 2024-11-21T04:23:18,822 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(157): Closed 72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:23:18,824 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=72a2e6b9ecc445dc6f2caad25d1a0335, regionState=CLOSED 2024-11-21T04:23:18,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=56, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6e16a083394339512ddf4220646112fd, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:23:18,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=55, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:23:18,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=56, resume processing ppid=54 2024-11-21T04:23:18,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, ppid=54, state=SUCCESS, hasLock=false; CloseRegionProcedure 6e16a083394339512ddf4220646112fd, server=fdc0a393186c,34363,1732162938185 in 167 msec 2024-11-21T04:23:18,831 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-11-21T04:23:18,831 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; CloseRegionProcedure 72a2e6b9ecc445dc6f2caad25d1a0335, server=fdc0a393186c,39347,1732162938035 in 170 msec 2024-11-21T04:23:18,832 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=6e16a083394339512ddf4220646112fd, UNASSIGN in 176 msec 2024-11-21T04:23:18,833 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=53, resume processing ppid=52 2024-11-21T04:23:18,833 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=72a2e6b9ecc445dc6f2caad25d1a0335, UNASSIGN in 177 msec 2024-11-21T04:23:18,836 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=51 2024-11-21T04:23:18,836 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=51, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 182 msec 2024-11-21T04:23:18,838 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162998838"}]},"ts":"1732162998838"} 2024-11-21T04:23:18,840 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-21T04:23:18,840 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-21T04:23:18,843 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 195 msec 2024-11-21T04:23:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-21T04:23:18,963 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-21T04:23:18,964 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-11-21T04:23:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T04:23:18,966 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T04:23:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-21T04:23:18,968 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=57, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T04:23:18,970 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-21T04:23:18,973 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:23:18,973 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd 2024-11-21T04:23:18,975 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/recovered.edits] 2024-11-21T04:23:18,975 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/recovered.edits] 2024-11-21T04:23:18,979 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/cf/fe3ed9aee01445de8ff6e33fee5ab8c7 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/cf/fe3ed9aee01445de8ff6e33fee5ab8c7 2024-11-21T04:23:18,980 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/cf/0df5ac04556a4b9481d45ee9b32c7bbc to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/cf/0df5ac04556a4b9481d45ee9b32c7bbc 2024-11-21T04:23:18,983 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335/recovered.edits/9.seqid 2024-11-21T04:23:18,983 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd/recovered.edits/9.seqid 2024-11-21T04:23:18,984 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/72a2e6b9ecc445dc6f2caad25d1a0335 2024-11-21T04:23:18,984 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithResetTtl/6e16a083394339512ddf4220646112fd 2024-11-21T04:23:18,984 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-21T04:23:18,987 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=57, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T04:23:18,989 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-21T04:23:18,992 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-21T04:23:18,994 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=57, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T04:23:18,994 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-21T04:23:18,994 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732162998994"}]},"ts":"9223372036854775807"} 2024-11-21T04:23:18,994 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732162998994"}]},"ts":"9223372036854775807"} 2024-11-21T04:23:18,997 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T04:23:18,997 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 72a2e6b9ecc445dc6f2caad25d1a0335, NAME => 'testtb-testExportWithResetTtl,,1732162976560.72a2e6b9ecc445dc6f2caad25d1a0335.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6e16a083394339512ddf4220646112fd, NAME => 'testtb-testExportWithResetTtl,1,1732162976560.6e16a083394339512ddf4220646112fd.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T04:23:18,997 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-21T04:23:18,997 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732162998997"}]},"ts":"9223372036854775807"} 2024-11-21T04:23:18,999 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-21T04:23:19,000 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=57, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-21T04:23:19,001 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 36 msec 2024-11-21T04:23:19,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T04:23:19,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T04:23:19,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T04:23:19,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T04:23:19,145 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-21T04:23:19,145 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-21T04:23:19,145 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-21T04:23:19,145 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-21T04:23:19,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T04:23:19,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T04:23:19,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T04:23:19,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-21T04:23:19,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:19,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:19,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:19,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:19,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-21T04:23:19,290 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-21T04:23:19,290 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-21T04:23:19,301 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-21T04:23:19,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-21T04:23:19,306 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-21T04:23:19,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-21T04:23:19,313 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-21T04:23:19,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-21T04:23:19,354 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=788 (was 777) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:42812 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (625843813) connection to localhost/127.0.0.1:42673 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 117662) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:45284 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.getContainerPid(ContainerLaunch.java:1062) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerCleanup.run(ContainerCleanup.java:119) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:43080 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42673 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165130082_1 at /127.0.0.1:45252 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2069 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1165130082_1 at /127.0.0.1:43056 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46691 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=807 (was 810), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=737 (was 620) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 19), AvailableMemoryMB=1496 (was 1978) 2024-11-21T04:23:19,355 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500 2024-11-21T04:23:19,377 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=788, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=737, ProcessCount=19, AvailableMemoryMB=1496 2024-11-21T04:23:19,377 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500 2024-11-21T04:23:19,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:23:19,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-21T04:23:19,381 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:23:19,382 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:23:19,382 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 58 2024-11-21T04:23:19,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-21T04:23:19,383 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:23:19,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741953_1129 (size=407) 2024-11-21T04:23:19,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741953_1129 (size=407) 2024-11-21T04:23:19,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741953_1129 (size=407) 2024-11-21T04:23:19,402 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7da54347ebc9f8163752e1690a82fef6, NAME => 'testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:19,403 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c5db7d0f20044794ceda0d8bf5a9bd32, NAME => 'testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:19,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741954_1130 (size=68) 2024-11-21T04:23:19,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741954_1130 (size=68) 2024-11-21T04:23:19,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741954_1130 (size=68) 2024-11-21T04:23:19,421 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:23:19,421 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing c5db7d0f20044794ceda0d8bf5a9bd32, disabling compactions & flushes 2024-11-21T04:23:19,421 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:19,421 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:19,422 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. after waiting 0 ms 2024-11-21T04:23:19,422 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:19,422 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:19,422 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for c5db7d0f20044794ceda0d8bf5a9bd32: Waiting for close lock at 1732162999421Disabling compacts and flushes for region at 1732162999421Disabling writes for close at 1732162999422 (+1 ms)Writing region close event to WAL at 1732162999422Closed at 1732162999422 2024-11-21T04:23:19,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741955_1131 (size=68) 2024-11-21T04:23:19,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741955_1131 (size=68) 2024-11-21T04:23:19,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741955_1131 (size=68) 2024-11-21T04:23:19,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:23:19,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 7da54347ebc9f8163752e1690a82fef6, disabling compactions & flushes 2024-11-21T04:23:19,427 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:19,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:19,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. after waiting 0 ms 2024-11-21T04:23:19,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:19,427 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:19,427 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7da54347ebc9f8163752e1690a82fef6: Waiting for close lock at 1732162999427Disabling compacts and flushes for region at 1732162999427Disabling writes for close at 1732162999427Writing region close event to WAL at 1732162999427Closed at 1732162999427 2024-11-21T04:23:19,428 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:23:19,429 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732162999429"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732162999429"}]},"ts":"1732162999429"} 2024-11-21T04:23:19,429 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732162999429"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732162999429"}]},"ts":"1732162999429"} 2024-11-21T04:23:19,432 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:23:19,432 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:23:19,433 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162999433"}]},"ts":"1732162999433"} 2024-11-21T04:23:19,434 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-21T04:23:19,435 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:23:19,436 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:23:19,436 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:23:19,436 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:23:19,436 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:23:19,436 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:23:19,436 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:23:19,436 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:23:19,436 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:23:19,436 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:23:19,436 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:23:19,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7da54347ebc9f8163752e1690a82fef6, ASSIGN}, {pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c5db7d0f20044794ceda0d8bf5a9bd32, ASSIGN}] 2024-11-21T04:23:19,438 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c5db7d0f20044794ceda0d8bf5a9bd32, ASSIGN 2024-11-21T04:23:19,438 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7da54347ebc9f8163752e1690a82fef6, ASSIGN 2024-11-21T04:23:19,439 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c5db7d0f20044794ceda0d8bf5a9bd32, ASSIGN; state=OFFLINE, location=fdc0a393186c,34363,1732162938185; forceNewPlan=false, retain=false 2024-11-21T04:23:19,439 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7da54347ebc9f8163752e1690a82fef6, ASSIGN; state=OFFLINE, location=fdc0a393186c,39347,1732162938035; forceNewPlan=false, retain=false 2024-11-21T04:23:19,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-21T04:23:19,590 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:23:19,590 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=c5db7d0f20044794ceda0d8bf5a9bd32, regionState=OPENING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:23:19,590 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=7da54347ebc9f8163752e1690a82fef6, regionState=OPENING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:23:19,592 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7da54347ebc9f8163752e1690a82fef6, ASSIGN because future has completed 2024-11-21T04:23:19,592 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7da54347ebc9f8163752e1690a82fef6, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:23:19,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c5db7d0f20044794ceda0d8bf5a9bd32, ASSIGN because future has completed 2024-11-21T04:23:19,593 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:23:19,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-21T04:23:19,748 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:19,748 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7752): Opening region: {ENCODED => 7da54347ebc9f8163752e1690a82fef6, NAME => 'testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T04:23:19,749 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. service=AccessControlService 2024-11-21T04:23:19,749 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:19,749 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7752): Opening region: {ENCODED => c5db7d0f20044794ceda0d8bf5a9bd32, NAME => 'testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T04:23:19,749 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:23:19,749 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. service=AccessControlService 2024-11-21T04:23:19,749 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:19,749 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:23:19,749 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:23:19,749 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7794): checking encryption for 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:19,749 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7797): checking classloading for 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:19,749 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:19,749 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:23:19,749 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7794): checking encryption for c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:19,750 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7797): checking classloading for c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:19,751 INFO [StoreOpener-c5db7d0f20044794ceda0d8bf5a9bd32-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:19,751 INFO [StoreOpener-7da54347ebc9f8163752e1690a82fef6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:19,752 INFO [StoreOpener-c5db7d0f20044794ceda0d8bf5a9bd32-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c5db7d0f20044794ceda0d8bf5a9bd32 columnFamilyName cf 2024-11-21T04:23:19,752 INFO [StoreOpener-7da54347ebc9f8163752e1690a82fef6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7da54347ebc9f8163752e1690a82fef6 columnFamilyName cf 2024-11-21T04:23:19,753 DEBUG [StoreOpener-c5db7d0f20044794ceda0d8bf5a9bd32-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:23:19,753 DEBUG [StoreOpener-7da54347ebc9f8163752e1690a82fef6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:23:19,753 INFO [StoreOpener-c5db7d0f20044794ceda0d8bf5a9bd32-1 {}] regionserver.HStore(327): Store=c5db7d0f20044794ceda0d8bf5a9bd32/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:23:19,754 INFO [StoreOpener-7da54347ebc9f8163752e1690a82fef6-1 {}] regionserver.HStore(327): Store=7da54347ebc9f8163752e1690a82fef6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:23:19,754 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1038): replaying wal for 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:19,754 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1038): replaying wal for c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:19,755 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:19,755 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:19,756 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:19,756 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:19,757 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1048): stopping wal replay for c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:19,757 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1048): stopping wal replay for 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:19,757 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1060): Cleaning up temporary data for c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:19,757 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1060): Cleaning up temporary data for 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:19,759 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1093): writing seq id for c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:19,760 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1093): writing seq id for 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:19,762 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:23:19,763 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1114): Opened c5db7d0f20044794ceda0d8bf5a9bd32; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64481328, jitterRate=-0.039153337478637695}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:23:19,763 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:19,764 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1006): Region open journal for c5db7d0f20044794ceda0d8bf5a9bd32: Running coprocessor pre-open hook at 1732162999750Writing region info on filesystem at 1732162999750Initializing all the Stores at 1732162999750Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162999750Cleaning up temporary data from old regions at 1732162999757 (+7 ms)Running coprocessor post-open hooks at 1732162999763 (+6 ms)Region opened successfully at 1732162999764 (+1 ms) 2024-11-21T04:23:19,765 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:23:19,765 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32., pid=62, masterSystemTime=1732162999745 2024-11-21T04:23:19,765 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1114): Opened 7da54347ebc9f8163752e1690a82fef6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65516640, jitterRate=-0.02372598648071289}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:23:19,766 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:19,766 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1006): Region open journal for 7da54347ebc9f8163752e1690a82fef6: Running coprocessor pre-open hook at 1732162999749Writing region info on filesystem at 1732162999749Initializing all the Stores at 1732162999750 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732162999750Cleaning up temporary data from old regions at 1732162999757 (+7 ms)Running coprocessor post-open hooks at 1732162999766 (+9 ms)Region opened successfully at 1732162999766 2024-11-21T04:23:19,767 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6., pid=61, masterSystemTime=1732162999744 2024-11-21T04:23:19,767 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:19,767 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:19,768 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=c5db7d0f20044794ceda0d8bf5a9bd32, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:23:19,769 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:19,769 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:19,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:23:19,772 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=7da54347ebc9f8163752e1690a82fef6, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:23:19,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7da54347ebc9f8163752e1690a82fef6, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:23:19,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=62, resume processing ppid=60 2024-11-21T04:23:19,777 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, ppid=60, state=SUCCESS, hasLock=false; OpenRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32, server=fdc0a393186c,34363,1732162938185 in 181 msec 2024-11-21T04:23:19,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-11-21T04:23:19,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; OpenRegionProcedure 7da54347ebc9f8163752e1690a82fef6, server=fdc0a393186c,39347,1732162938035 in 184 msec 2024-11-21T04:23:19,778 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c5db7d0f20044794ceda0d8bf5a9bd32, ASSIGN in 340 msec 2024-11-21T04:23:19,780 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=58 2024-11-21T04:23:19,780 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7da54347ebc9f8163752e1690a82fef6, ASSIGN in 341 msec 2024-11-21T04:23:19,781 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:23:19,781 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732162999781"}]},"ts":"1732162999781"} 2024-11-21T04:23:19,783 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-21T04:23:19,784 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:23:19,784 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-21T04:23:19,788 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T04:23:19,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:19,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:19,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:19,811 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:19,820 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:19,820 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:19,821 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:19,822 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:19,828 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 442 msec 2024-11-21T04:23:20,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-21T04:23:20,013 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-21T04:23:20,013 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-11-21T04:23:20,013 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:23:20,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-11-21T04:23:20,018 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:23:20,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-11-21T04:23:20,018 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T04:23:20,022 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T04:23:20,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163000022 (current time:1732163000022). 2024-11-21T04:23:20,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:23:20,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-21T04:23:20,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:23:20,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@747cc2b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:23:20,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:23:20,026 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:23:20,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:23:20,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:23:20,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67520c2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:23:20,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:23:20,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,028 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60964, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:23:20,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59911048, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:23:20,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:23:20,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:20,031 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34238, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:20,033 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:23:20,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:23:20,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,033 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:23:20,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@363a5db8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:23:20,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:23:20,039 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:23:20,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:23:20,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:23:20,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9234ca5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:23:20,040 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:23:20,040 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,041 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60988, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:23:20,041 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27207289, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:23:20,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:23:20,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:20,044 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34254, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:20,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:23:20,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:20,048 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52336, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:20,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:23:20,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:23:20,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T04:23:20,050 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:23:20,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:23:20,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T04:23:20,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-21T04:23:20,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-21T04:23:20,053 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:23:20,054 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:23:20,057 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:23:20,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741956_1132 (size=170) 2024-11-21T04:23:20,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741956_1132 (size=170) 2024-11-21T04:23:20,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741956_1132 (size=170) 2024-11-21T04:23:20,065 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:23:20,065 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7da54347ebc9f8163752e1690a82fef6}, {pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32}] 2024-11-21T04:23:20,067 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:20,067 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:20,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-21T04:23:20,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=64 2024-11-21T04:23:20,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=65 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.HRegion(2603): Flush status journal for c5db7d0f20044794ceda0d8bf5a9bd32: 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.HRegion(2603): Flush status journal for 7da54347ebc9f8163752e1690a82fef6: 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. for emptySnaptb0-testExportFileSystemState completed. 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. for emptySnaptb0-testExportFileSystemState completed. 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:23:20,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:23:20,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741958_1134 (size=71) 2024-11-21T04:23:20,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741958_1134 (size=71) 2024-11-21T04:23:20,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:20,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-21T04:23:20,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=64 2024-11-21T04:23:20,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:20,233 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:20,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741958_1134 (size=71) 2024-11-21T04:23:20,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7da54347ebc9f8163752e1690a82fef6 in 169 msec 2024-11-21T04:23:20,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741957_1133 (size=71) 2024-11-21T04:23:20,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741957_1133 (size=71) 2024-11-21T04:23:20,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741957_1133 (size=71) 2024-11-21T04:23:20,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:20,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=65 2024-11-21T04:23:20,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=65 2024-11-21T04:23:20,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:20,239 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:20,242 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-11-21T04:23:20,242 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32 in 175 msec 2024-11-21T04:23:20,242 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:23:20,243 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:23:20,244 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:23:20,244 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-21T04:23:20,245 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-21T04:23:20,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741959_1135 (size=552) 2024-11-21T04:23:20,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741959_1135 (size=552) 2024-11-21T04:23:20,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741959_1135 (size=552) 2024-11-21T04:23:20,266 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:23:20,272 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:23:20,273 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-21T04:23:20,274 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:23:20,274 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-21T04:23:20,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 223 msec 2024-11-21T04:23:20,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-21T04:23:20,372 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-21T04:23:20,378 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='079d3919e4664e708e7293b912319ccfc', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6., hostname=fdc0a393186c,39347,1732162938035, seqNum=2] 2024-11-21T04:23:20,380 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='1035ddc2d9b3cf71c721bd4209bc55fbc', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:23:20,382 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='2e2a16ba403d558bcf566b72328fe30de', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:23:20,383 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='3ef681d2fe4ddbb2d26c39a79a1537665', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:23:20,384 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='408e3c47148fbce9ee8658db4aa236229', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:23:20,396 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39347 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:23:20,398 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:23:20,401 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T04:23:20,405 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-21T04:23:20,406 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:20,406 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:23:20,409 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T04:23:20,423 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T04:23:20,432 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T04:23:20,436 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T04:23:20,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163000436 (current time:1732163000436). 2024-11-21T04:23:20,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:23:20,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-21T04:23:20,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:23:20,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25a5c2ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:23:20,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:23:20,440 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:23:20,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:23:20,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:23:20,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@679a7d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:23:20,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:23:20,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,441 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32776, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:23:20,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16cb4827, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:23:20,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:23:20,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:20,445 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34260, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:20,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:23:20,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:23:20,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,447 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:23:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ee3388e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:23:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:23:20,449 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:23:20,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:23:20,449 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:23:20,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66235252, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:23:20,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:23:20,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,451 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32798, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:23:20,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18e1da94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:20,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:23:20,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:23:20,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:20,457 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34276, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:20,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:23:20,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:20,461 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52338, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:20,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:23:20,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:23:20,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:20,462 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:23:20,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T04:23:20,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:23:20,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T04:23:20,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-21T04:23:20,465 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:23:20,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-21T04:23:20,466 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:23:20,472 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:23:20,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741960_1136 (size=165) 2024-11-21T04:23:20,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741960_1136 (size=165) 2024-11-21T04:23:20,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741960_1136 (size=165) 2024-11-21T04:23:20,483 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:23:20,483 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7da54347ebc9f8163752e1690a82fef6}, {pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32}] 2024-11-21T04:23:20,484 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:20,484 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:20,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-21T04:23:20,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=68 2024-11-21T04:23:20,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=67 2024-11-21T04:23:20,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:20,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:20,637 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2902): Flushing 7da54347ebc9f8163752e1690a82fef6 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-21T04:23:20,637 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2902): Flushing c5db7d0f20044794ceda0d8bf5a9bd32 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-21T04:23:20,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/.tmp/cf/10c64494d14e41dba25873a10d915dfe is 71, key is 13b5c50c867170b63984f875ad3e49a5/cf:q/1732163000398/Put/seqid=0 2024-11-21T04:23:20,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/.tmp/cf/68ed098ffdd643cab92b6d7f149ecc1d is 71, key is 09c55b1ed46aaaed264200055e9c34ae/cf:q/1732163000396/Put/seqid=0 2024-11-21T04:23:20,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741961_1137 (size=8326) 2024-11-21T04:23:20,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741961_1137 (size=8326) 2024-11-21T04:23:20,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741961_1137 (size=8326) 2024-11-21T04:23:20,693 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/.tmp/cf/10c64494d14e41dba25873a10d915dfe 2024-11-21T04:23:20,702 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/.tmp/cf/10c64494d14e41dba25873a10d915dfe as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/cf/10c64494d14e41dba25873a10d915dfe 2024-11-21T04:23:20,716 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/cf/10c64494d14e41dba25873a10d915dfe, entries=47, sequenceid=6, filesize=8.1 K 2024-11-21T04:23:20,717 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for c5db7d0f20044794ceda0d8bf5a9bd32 in 81ms, sequenceid=6, compaction requested=false 2024-11-21T04:23:20,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-21T04:23:20,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2603): Flush status journal for c5db7d0f20044794ceda0d8bf5a9bd32: 2024-11-21T04:23:20,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. for snaptb0-testExportFileSystemState completed. 2024-11-21T04:23:20,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-21T04:23:20,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:23:20,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/cf/10c64494d14e41dba25873a10d915dfe] hfiles 2024-11-21T04:23:20,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/cf/10c64494d14e41dba25873a10d915dfe for snapshot=snaptb0-testExportFileSystemState 2024-11-21T04:23:20,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741962_1138 (size=5288) 2024-11-21T04:23:20,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741962_1138 (size=5288) 2024-11-21T04:23:20,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741962_1138 (size=5288) 2024-11-21T04:23:20,735 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/.tmp/cf/68ed098ffdd643cab92b6d7f149ecc1d 2024-11-21T04:23:20,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/.tmp/cf/68ed098ffdd643cab92b6d7f149ecc1d as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/cf/68ed098ffdd643cab92b6d7f149ecc1d 2024-11-21T04:23:20,756 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/cf/68ed098ffdd643cab92b6d7f149ecc1d, entries=3, sequenceid=6, filesize=5.2 K 2024-11-21T04:23:20,757 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 7da54347ebc9f8163752e1690a82fef6 in 121ms, sequenceid=6, compaction requested=false 2024-11-21T04:23:20,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2603): Flush status journal for 7da54347ebc9f8163752e1690a82fef6: 2024-11-21T04:23:20,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. for snaptb0-testExportFileSystemState completed. 2024-11-21T04:23:20,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-21T04:23:20,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:23:20,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/cf/68ed098ffdd643cab92b6d7f149ecc1d] hfiles 2024-11-21T04:23:20,758 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/cf/68ed098ffdd643cab92b6d7f149ecc1d for snapshot=snaptb0-testExportFileSystemState 2024-11-21T04:23:20,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741963_1139 (size=110) 2024-11-21T04:23:20,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741963_1139 (size=110) 2024-11-21T04:23:20,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741963_1139 (size=110) 2024-11-21T04:23:20,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:20,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-21T04:23:20,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=68 2024-11-21T04:23:20,764 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:20,764 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:20,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32 in 282 msec 2024-11-21T04:23:20,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-21T04:23:20,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741964_1140 (size=110) 2024-11-21T04:23:20,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741964_1140 (size=110) 2024-11-21T04:23:20,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741964_1140 (size=110) 2024-11-21T04:23:20,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:20,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=67 2024-11-21T04:23:20,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=67 2024-11-21T04:23:20,801 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:20,801 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:20,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=66 2024-11-21T04:23:20,804 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:23:20,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7da54347ebc9f8163752e1690a82fef6 in 319 msec 2024-11-21T04:23:20,805 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:23:20,805 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:23:20,805 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-21T04:23:20,806 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-21T04:23:20,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741965_1141 (size=630) 2024-11-21T04:23:20,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741965_1141 (size=630) 2024-11-21T04:23:20,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741965_1141 (size=630) 2024-11-21T04:23:20,830 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:23:20,838 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:23:20,839 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-21T04:23:20,843 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:23:20,843 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-21T04:23:20,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 380 msec 2024-11-21T04:23:21,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-21T04:23:21,093 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-21T04:23:21,094 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163001094 2024-11-21T04:23:21,094 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44067, tgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163001094, rawTgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163001094, srcFsUri=hdfs://localhost:44067, srcDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:21,126 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44067, inputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:21,126 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163001094, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163001094/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-21T04:23:21,128 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T04:23:21,133 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163001094/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-21T04:23:21,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741967_1143 (size=630) 2024-11-21T04:23:21,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741967_1143 (size=630) 2024-11-21T04:23:21,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741967_1143 (size=630) 2024-11-21T04:23:21,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741966_1142 (size=165) 2024-11-21T04:23:21,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741966_1142 (size=165) 2024-11-21T04:23:21,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741966_1142 (size=165) 2024-11-21T04:23:21,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:21,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:21,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:21,455 DEBUG [master/fdc0a393186c:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region c5db7d0f20044794ceda0d8bf5a9bd32 changed from -1.0 to 0.0, refreshing cache 2024-11-21T04:23:21,456 DEBUG [master/fdc0a393186c:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7da54347ebc9f8163752e1690a82fef6 changed from -1.0 to 0.0, refreshing cache 2024-11-21T04:23:21,456 DEBUG [master/fdc0a393186c:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 796b8a6daae5bbdc223e78db15304576 changed from -1.0 to 0.0, refreshing cache 2024-11-21T04:23:22,082 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0002/container_1732162945431_0002_01_000002/launch_container.sh] 2024-11-21T04:23:22,082 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0002/container_1732162945431_0002_01_000002/container_tokens] 2024-11-21T04:23:22,082 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0002/container_1732162945431_0002_01_000002/sysfs] 2024-11-21T04:23:22,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-2143213717339303361.jar 2024-11-21T04:23:22,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:22,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:22,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-17679607100105664211.jar 2024-11-21T04:23:22,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:22,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:22,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:22,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:22,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:22,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:22,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T04:23:22,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T04:23:22,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T04:23:22,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T04:23:22,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T04:23:22,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T04:23:22,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T04:23:22,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T04:23:22,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T04:23:22,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T04:23:22,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T04:23:22,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:22,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:22,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:22,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:22,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:22,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:22,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:22,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741968_1144 (size=131440) 2024-11-21T04:23:22,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741968_1144 (size=131440) 2024-11-21T04:23:22,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741968_1144 (size=131440) 2024-11-21T04:23:22,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741969_1145 (size=4188619) 2024-11-21T04:23:22,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741969_1145 (size=4188619) 2024-11-21T04:23:22,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741969_1145 (size=4188619) 2024-11-21T04:23:22,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741970_1146 (size=1323991) 2024-11-21T04:23:22,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741970_1146 (size=1323991) 2024-11-21T04:23:22,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741970_1146 (size=1323991) 2024-11-21T04:23:22,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741971_1147 (size=6424740) 2024-11-21T04:23:22,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741971_1147 (size=6424740) 2024-11-21T04:23:22,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741971_1147 (size=6424740) 2024-11-21T04:23:22,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741972_1148 (size=903734) 2024-11-21T04:23:22,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741972_1148 (size=903734) 2024-11-21T04:23:22,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741972_1148 (size=903734) 2024-11-21T04:23:22,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741973_1149 (size=8360083) 2024-11-21T04:23:22,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741973_1149 (size=8360083) 2024-11-21T04:23:22,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741973_1149 (size=8360083) 2024-11-21T04:23:22,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741974_1150 (size=440656) 2024-11-21T04:23:22,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741974_1150 (size=440656) 2024-11-21T04:23:22,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741974_1150 (size=440656) 2024-11-21T04:23:22,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741975_1151 (size=1877034) 2024-11-21T04:23:22,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741975_1151 (size=1877034) 2024-11-21T04:23:22,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741975_1151 (size=1877034) 2024-11-21T04:23:22,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741976_1152 (size=77835) 2024-11-21T04:23:22,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741976_1152 (size=77835) 2024-11-21T04:23:22,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741976_1152 (size=77835) 2024-11-21T04:23:22,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741977_1153 (size=30949) 2024-11-21T04:23:22,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741977_1153 (size=30949) 2024-11-21T04:23:22,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741977_1153 (size=30949) 2024-11-21T04:23:22,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741978_1154 (size=1597327) 2024-11-21T04:23:22,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741978_1154 (size=1597327) 2024-11-21T04:23:22,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741978_1154 (size=1597327) 2024-11-21T04:23:22,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741979_1155 (size=4695811) 2024-11-21T04:23:22,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741979_1155 (size=4695811) 2024-11-21T04:23:22,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741979_1155 (size=4695811) 2024-11-21T04:23:22,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741980_1156 (size=232957) 2024-11-21T04:23:22,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741980_1156 (size=232957) 2024-11-21T04:23:22,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741980_1156 (size=232957) 2024-11-21T04:23:22,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741981_1157 (size=127628) 2024-11-21T04:23:22,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741981_1157 (size=127628) 2024-11-21T04:23:22,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741981_1157 (size=127628) 2024-11-21T04:23:22,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741982_1158 (size=20406) 2024-11-21T04:23:22,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741982_1158 (size=20406) 2024-11-21T04:23:22,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741982_1158 (size=20406) 2024-11-21T04:23:22,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741983_1159 (size=5175431) 2024-11-21T04:23:22,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741983_1159 (size=5175431) 2024-11-21T04:23:22,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741983_1159 (size=5175431) 2024-11-21T04:23:22,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741984_1160 (size=217634) 2024-11-21T04:23:22,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741984_1160 (size=217634) 2024-11-21T04:23:22,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741984_1160 (size=217634) 2024-11-21T04:23:22,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741985_1161 (size=1832290) 2024-11-21T04:23:22,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741985_1161 (size=1832290) 2024-11-21T04:23:22,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741985_1161 (size=1832290) 2024-11-21T04:23:22,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741986_1162 (size=322274) 2024-11-21T04:23:22,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741986_1162 (size=322274) 2024-11-21T04:23:22,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741986_1162 (size=322274) 2024-11-21T04:23:22,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741987_1163 (size=503880) 2024-11-21T04:23:22,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741987_1163 (size=503880) 2024-11-21T04:23:22,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741987_1163 (size=503880) 2024-11-21T04:23:22,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741988_1164 (size=29229) 2024-11-21T04:23:22,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741988_1164 (size=29229) 2024-11-21T04:23:22,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741988_1164 (size=29229) 2024-11-21T04:23:22,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741989_1165 (size=24096) 2024-11-21T04:23:22,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741989_1165 (size=24096) 2024-11-21T04:23:22,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741989_1165 (size=24096) 2024-11-21T04:23:22,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741990_1166 (size=111872) 2024-11-21T04:23:22,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741990_1166 (size=111872) 2024-11-21T04:23:22,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741990_1166 (size=111872) 2024-11-21T04:23:22,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741991_1167 (size=45609) 2024-11-21T04:23:22,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741991_1167 (size=45609) 2024-11-21T04:23:22,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741991_1167 (size=45609) 2024-11-21T04:23:22,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741992_1168 (size=136454) 2024-11-21T04:23:22,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741992_1168 (size=136454) 2024-11-21T04:23:22,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741992_1168 (size=136454) 2024-11-21T04:23:22,819 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T04:23:22,821 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-21T04:23:22,823 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T04:23:22,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741993_1169 (size=344) 2024-11-21T04:23:22,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741993_1169 (size=344) 2024-11-21T04:23:22,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741993_1169 (size=344) 2024-11-21T04:23:22,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741994_1170 (size=15) 2024-11-21T04:23:22,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741994_1170 (size=15) 2024-11-21T04:23:22,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741994_1170 (size=15) 2024-11-21T04:23:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741995_1171 (size=303740) 2024-11-21T04:23:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741995_1171 (size=303740) 2024-11-21T04:23:22,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741995_1171 (size=303740) 2024-11-21T04:23:23,114 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:23:23,114 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:23:23,117 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0002_000001 (auth:SIMPLE) from 127.0.0.1:32892 2024-11-21T04:23:23,125 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0002/container_1732162945431_0002_01_000001/launch_container.sh] 2024-11-21T04:23:23,125 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0002/container_1732162945431_0002_01_000001/container_tokens] 2024-11-21T04:23:23,125 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0002/container_1732162945431_0002_01_000001/sysfs] 2024-11-21T04:23:23,825 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0003_000001 (auth:SIMPLE) from 127.0.0.1:49910 2024-11-21T04:23:23,983 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:23:27,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-21T04:23:27,560 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-21T04:23:27,561 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-21T04:23:27,561 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-21T04:23:29,532 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0003_000001 (auth:SIMPLE) from 127.0.0.1:34412 2024-11-21T04:23:29,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741996_1172 (size=349390) 2024-11-21T04:23:29,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741996_1172 (size=349390) 2024-11-21T04:23:29,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741996_1172 (size=349390) 2024-11-21T04:23:31,834 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0003_000001 (auth:SIMPLE) from 127.0.0.1:49926 2024-11-21T04:23:33,064 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:23:34,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741997_1173 (size=8326) 2024-11-21T04:23:34,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741997_1173 (size=8326) 2024-11-21T04:23:34,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741997_1173 (size=8326) 2024-11-21T04:23:35,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741998_1174 (size=5288) 2024-11-21T04:23:35,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741998_1174 (size=5288) 2024-11-21T04:23:35,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741998_1174 (size=5288) 2024-11-21T04:23:35,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741999_1175 (size=17422) 2024-11-21T04:23:35,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741999_1175 (size=17422) 2024-11-21T04:23:35,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741999_1175 (size=17422) 2024-11-21T04:23:35,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742000_1176 (size=465) 2024-11-21T04:23:35,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742000_1176 (size=465) 2024-11-21T04:23:35,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742000_1176 (size=465) 2024-11-21T04:23:35,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742001_1177 (size=17422) 2024-11-21T04:23:35,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742001_1177 (size=17422) 2024-11-21T04:23:35,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742001_1177 (size=17422) 2024-11-21T04:23:35,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742002_1178 (size=349390) 2024-11-21T04:23:35,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742002_1178 (size=349390) 2024-11-21T04:23:35,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742002_1178 (size=349390) 2024-11-21T04:23:35,355 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0003_000001 (auth:SIMPLE) from 127.0.0.1:52632 2024-11-21T04:23:37,045 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T04:23:37,047 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T04:23:37,055 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemState 2024-11-21T04:23:37,055 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T04:23:37,056 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T04:23:37,056 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-21T04:23:37,056 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-21T04:23:37,056 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-21T04:23:37,056 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163001094/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163001094/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-21T04:23:37,057 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163001094/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-21T04:23:37,057 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163001094/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-21T04:23:37,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-11-21T04:23:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=69, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-21T04:23:37,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-21T04:23:37,068 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163017068"}]},"ts":"1732163017068"} 2024-11-21T04:23:37,070 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-21T04:23:37,070 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-21T04:23:37,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-21T04:23:37,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7da54347ebc9f8163752e1690a82fef6, UNASSIGN}, {pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c5db7d0f20044794ceda0d8bf5a9bd32, UNASSIGN}] 2024-11-21T04:23:37,074 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c5db7d0f20044794ceda0d8bf5a9bd32, UNASSIGN 2024-11-21T04:23:37,074 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7da54347ebc9f8163752e1690a82fef6, UNASSIGN 2024-11-21T04:23:37,075 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=7da54347ebc9f8163752e1690a82fef6, regionState=CLOSING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:23:37,075 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=c5db7d0f20044794ceda0d8bf5a9bd32, regionState=CLOSING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:23:37,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7da54347ebc9f8163752e1690a82fef6, UNASSIGN because future has completed 2024-11-21T04:23:37,081 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:23:37,081 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7da54347ebc9f8163752e1690a82fef6, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:23:37,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c5db7d0f20044794ceda0d8bf5a9bd32, UNASSIGN because future has completed 2024-11-21T04:23:37,082 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:23:37,082 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:23:37,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-21T04:23:37,234 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(122): Close 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:37,234 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:23:37,235 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1722): Closing 7da54347ebc9f8163752e1690a82fef6, disabling compactions & flushes 2024-11-21T04:23:37,235 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:37,235 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:37,235 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. after waiting 0 ms 2024-11-21T04:23:37,235 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:37,236 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(122): Close c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:37,236 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:23:37,236 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1722): Closing c5db7d0f20044794ceda0d8bf5a9bd32, disabling compactions & flushes 2024-11-21T04:23:37,236 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:37,236 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:37,236 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. after waiting 0 ms 2024-11-21T04:23:37,236 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:37,243 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:23:37,244 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:23:37,244 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:23:37,244 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6. 2024-11-21T04:23:37,244 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1676): Region close journal for 7da54347ebc9f8163752e1690a82fef6: Waiting for close lock at 1732163017234Running coprocessor pre-close hooks at 1732163017234Disabling compacts and flushes for region at 1732163017234Disabling writes for close at 1732163017235 (+1 ms)Writing region close event to WAL at 1732163017235Running coprocessor post-close hooks at 1732163017244 (+9 ms)Closed at 1732163017244 2024-11-21T04:23:37,244 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:23:37,244 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32. 2024-11-21T04:23:37,245 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1676): Region close journal for c5db7d0f20044794ceda0d8bf5a9bd32: Waiting for close lock at 1732163017236Running coprocessor pre-close hooks at 1732163017236Disabling compacts and flushes for region at 1732163017236Disabling writes for close at 1732163017236Writing region close event to WAL at 1732163017237 (+1 ms)Running coprocessor post-close hooks at 1732163017244 (+7 ms)Closed at 1732163017244 2024-11-21T04:23:37,246 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(157): Closed 7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:37,247 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=7da54347ebc9f8163752e1690a82fef6, regionState=CLOSED 2024-11-21T04:23:37,247 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(157): Closed c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:37,248 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=c5db7d0f20044794ceda0d8bf5a9bd32, regionState=CLOSED 2024-11-21T04:23:37,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=73, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7da54347ebc9f8163752e1690a82fef6, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:23:37,250 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=74, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:23:37,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-11-21T04:23:37,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; CloseRegionProcedure 7da54347ebc9f8163752e1690a82fef6, server=fdc0a393186c,39347,1732162938035 in 169 msec 2024-11-21T04:23:37,253 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=74, resume processing ppid=72 2024-11-21T04:23:37,253 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=7da54347ebc9f8163752e1690a82fef6, UNASSIGN in 179 msec 2024-11-21T04:23:37,253 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, ppid=72, state=SUCCESS, hasLock=false; CloseRegionProcedure c5db7d0f20044794ceda0d8bf5a9bd32, server=fdc0a393186c,34363,1732162938185 in 169 msec 2024-11-21T04:23:37,255 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=72, resume processing ppid=70 2024-11-21T04:23:37,255 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c5db7d0f20044794ceda0d8bf5a9bd32, UNASSIGN in 180 msec 2024-11-21T04:23:37,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=69 2024-11-21T04:23:37,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=69, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 185 msec 2024-11-21T04:23:37,258 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163017258"}]},"ts":"1732163017258"} 2024-11-21T04:23:37,260 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-21T04:23:37,260 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-21T04:23:37,262 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 196 msec 2024-11-21T04:23:37,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-21T04:23:37,383 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-21T04:23:37,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-11-21T04:23:37,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T04:23:37,388 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T04:23:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-21T04:23:37,389 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=75, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T04:23:37,393 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-21T04:23:37,394 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:37,394 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:37,396 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/recovered.edits] 2024-11-21T04:23:37,396 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/recovered.edits] 2024-11-21T04:23:37,400 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/cf/68ed098ffdd643cab92b6d7f149ecc1d to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/cf/68ed098ffdd643cab92b6d7f149ecc1d 2024-11-21T04:23:37,400 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/cf/10c64494d14e41dba25873a10d915dfe to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/cf/10c64494d14e41dba25873a10d915dfe 2024-11-21T04:23:37,404 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32/recovered.edits/9.seqid 2024-11-21T04:23:37,404 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6/recovered.edits/9.seqid 2024-11-21T04:23:37,405 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/c5db7d0f20044794ceda0d8bf5a9bd32 2024-11-21T04:23:37,405 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemState/7da54347ebc9f8163752e1690a82fef6 2024-11-21T04:23:37,405 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-21T04:23:37,408 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=75, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T04:23:37,412 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-21T04:23:37,415 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-21T04:23:37,418 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=75, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T04:23:37,418 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-21T04:23:37,419 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163017418"}]},"ts":"9223372036854775807"} 2024-11-21T04:23:37,419 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163017418"}]},"ts":"9223372036854775807"} 2024-11-21T04:23:37,425 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T04:23:37,425 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7da54347ebc9f8163752e1690a82fef6, NAME => 'testtb-testExportFileSystemState,,1732162999378.7da54347ebc9f8163752e1690a82fef6.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c5db7d0f20044794ceda0d8bf5a9bd32, NAME => 'testtb-testExportFileSystemState,1,1732162999378.c5db7d0f20044794ceda0d8bf5a9bd32.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T04:23:37,425 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-21T04:23:37,425 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732163017425"}]},"ts":"9223372036854775807"} 2024-11-21T04:23:37,429 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-21T04:23:37,430 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=75, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-21T04:23:37,432 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 46 msec 2024-11-21T04:23:37,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T04:23:37,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T04:23:37,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T04:23:37,474 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-21T04:23:37,474 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-21T04:23:37,474 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-21T04:23:37,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T04:23:37,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T04:23:37,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T04:23:37,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:37,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:37,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:37,481 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data null 2024-11-21T04:23:37,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-21T04:23:37,482 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-21T04:23:37,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-21T04:23:37,483 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-21T04:23:37,483 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-21T04:23:37,492 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-21T04:23:37,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-21T04:23:37,497 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-21T04:23:37,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-21T04:23:37,530 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=791 (was 788) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 120382) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_661095320_1 at /127.0.0.1:49554 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:45478 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2665 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46169 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:49584 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40599 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42623 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (625843813) connection to localhost/127.0.0.1:40599 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=802 (was 807), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=696 (was 737), ProcessCount=22 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=1265 (was 1496) 2024-11-21T04:23:37,530 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-11-21T04:23:37,552 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=791, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=696, ProcessCount=19, AvailableMemoryMB=1272 2024-11-21T04:23:37,552 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-11-21T04:23:37,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:23:37,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-21T04:23:37,559 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:23:37,559 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:23:37,560 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 76 2024-11-21T04:23:37,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-21T04:23:37,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-21T04:23:37,561 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:23:37,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742003_1179 (size=404) 2024-11-21T04:23:37,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742003_1179 (size=404) 2024-11-21T04:23:37,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742003_1179 (size=404) 2024-11-21T04:23:37,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-21T04:23:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-21T04:23:37,976 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6f37cd7d66b243795426efcca5f580ca, NAME => 'testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:37,976 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a837349f5bb27d71a1248c553065d0cf, NAME => 'testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:37,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742004_1180 (size=65) 2024-11-21T04:23:37,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742004_1180 (size=65) 2024-11-21T04:23:37,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742005_1181 (size=65) 2024-11-21T04:23:37,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742004_1180 (size=65) 2024-11-21T04:23:37,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742005_1181 (size=65) 2024-11-21T04:23:37,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742005_1181 (size=65) 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 6f37cd7d66b243795426efcca5f580ca, disabling compactions & flushes 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:23:37,986 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing a837349f5bb27d71a1248c553065d0cf, disabling compactions & flushes 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. after waiting 0 ms 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:37,986 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:23:37,986 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. after waiting 0 ms 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6f37cd7d66b243795426efcca5f580ca: Waiting for close lock at 1732163017986Disabling compacts and flushes for region at 1732163017986Disabling writes for close at 1732163017986Writing region close event to WAL at 1732163017986Closed at 1732163017986 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:23:37,986 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:23:37,986 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for a837349f5bb27d71a1248c553065d0cf: Waiting for close lock at 1732163017986Disabling compacts and flushes for region at 1732163017986Disabling writes for close at 1732163017986Writing region close event to WAL at 1732163017986Closed at 1732163017986 2024-11-21T04:23:37,988 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:23:37,988 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732163017988"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163017988"}]},"ts":"1732163017988"} 2024-11-21T04:23:37,988 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732163017988"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163017988"}]},"ts":"1732163017988"} 2024-11-21T04:23:37,991 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:23:37,992 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:23:37,992 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163017992"}]},"ts":"1732163017992"} 2024-11-21T04:23:37,993 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-21T04:23:37,994 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:23:37,995 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:23:37,995 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:23:37,995 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:23:37,995 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:23:37,995 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:23:37,995 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:23:37,995 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:23:37,995 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:23:37,995 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:23:37,995 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:23:37,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6f37cd7d66b243795426efcca5f580ca, ASSIGN}, {pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a837349f5bb27d71a1248c553065d0cf, ASSIGN}] 2024-11-21T04:23:37,997 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6f37cd7d66b243795426efcca5f580ca, ASSIGN 2024-11-21T04:23:37,997 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a837349f5bb27d71a1248c553065d0cf, ASSIGN 2024-11-21T04:23:37,998 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6f37cd7d66b243795426efcca5f580ca, ASSIGN; state=OFFLINE, location=fdc0a393186c,42393,1732162938268; forceNewPlan=false, retain=false 2024-11-21T04:23:37,998 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a837349f5bb27d71a1248c553065d0cf, ASSIGN; state=OFFLINE, location=fdc0a393186c,34363,1732162938185; forceNewPlan=false, retain=false 2024-11-21T04:23:38,148 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:23:38,148 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=6f37cd7d66b243795426efcca5f580ca, regionState=OPENING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:23:38,148 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=a837349f5bb27d71a1248c553065d0cf, regionState=OPENING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:23:38,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a837349f5bb27d71a1248c553065d0cf, ASSIGN because future has completed 2024-11-21T04:23:38,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=79, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure a837349f5bb27d71a1248c553065d0cf, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:23:38,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6f37cd7d66b243795426efcca5f580ca, ASSIGN because future has completed 2024-11-21T04:23:38,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6f37cd7d66b243795426efcca5f580ca, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:23:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-21T04:23:38,307 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:23:38,307 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7752): Opening region: {ENCODED => a837349f5bb27d71a1248c553065d0cf, NAME => 'testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T04:23:38,307 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. service=AccessControlService 2024-11-21T04:23:38,308 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:23:38,308 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,308 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:23:38,308 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7794): checking encryption for a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,308 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:38,308 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7797): checking classloading for a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,308 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7752): Opening region: {ENCODED => 6f37cd7d66b243795426efcca5f580ca, NAME => 'testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T04:23:38,309 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. service=AccessControlService 2024-11-21T04:23:38,309 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:23:38,309 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,310 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:23:38,310 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7794): checking encryption for 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,310 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7797): checking classloading for 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,310 INFO [StoreOpener-a837349f5bb27d71a1248c553065d0cf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,315 INFO [StoreOpener-a837349f5bb27d71a1248c553065d0cf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a837349f5bb27d71a1248c553065d0cf columnFamilyName cf 2024-11-21T04:23:38,315 INFO [StoreOpener-6f37cd7d66b243795426efcca5f580ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,315 DEBUG [StoreOpener-a837349f5bb27d71a1248c553065d0cf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:23:38,315 INFO [StoreOpener-a837349f5bb27d71a1248c553065d0cf-1 {}] regionserver.HStore(327): Store=a837349f5bb27d71a1248c553065d0cf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:23:38,316 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1038): replaying wal for a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,317 INFO [StoreOpener-6f37cd7d66b243795426efcca5f580ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6f37cd7d66b243795426efcca5f580ca columnFamilyName cf 2024-11-21T04:23:38,317 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,317 DEBUG [StoreOpener-6f37cd7d66b243795426efcca5f580ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:23:38,317 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,317 INFO [StoreOpener-6f37cd7d66b243795426efcca5f580ca-1 {}] regionserver.HStore(327): Store=6f37cd7d66b243795426efcca5f580ca/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:23:38,318 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1038): replaying wal for 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,318 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1048): stopping wal replay for a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,318 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1060): Cleaning up temporary data for a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,319 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,319 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,319 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1048): stopping wal replay for 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,319 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1060): Cleaning up temporary data for 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,320 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1093): writing seq id for a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,322 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1093): writing seq id for 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,322 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:23:38,323 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1114): Opened a837349f5bb27d71a1248c553065d0cf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59420250, jitterRate=-0.1145692765712738}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:23:38,323 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,324 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1006): Region open journal for a837349f5bb27d71a1248c553065d0cf: Running coprocessor pre-open hook at 1732163018308Writing region info on filesystem at 1732163018308Initializing all the Stores at 1732163018310 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163018310Cleaning up temporary data from old regions at 1732163018318 (+8 ms)Running coprocessor post-open hooks at 1732163018323 (+5 ms)Region opened successfully at 1732163018324 (+1 ms) 2024-11-21T04:23:38,325 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf., pid=79, masterSystemTime=1732163018303 2024-11-21T04:23:38,325 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:23:38,326 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1114): Opened 6f37cd7d66b243795426efcca5f580ca; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74941261, jitterRate=0.1167118102312088}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:23:38,326 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,326 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1006): Region open journal for 6f37cd7d66b243795426efcca5f580ca: Running coprocessor pre-open hook at 1732163018310Writing region info on filesystem at 1732163018310Initializing all the Stores at 1732163018313 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163018313Cleaning up temporary data from old regions at 1732163018319 (+6 ms)Running coprocessor post-open hooks at 1732163018326 (+7 ms)Region opened successfully at 1732163018326 2024-11-21T04:23:38,327 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:23:38,327 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:23:38,327 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca., pid=80, masterSystemTime=1732163018304 2024-11-21T04:23:38,328 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=a837349f5bb27d71a1248c553065d0cf, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:23:38,329 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:38,329 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:38,330 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=6f37cd7d66b243795426efcca5f580ca, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:23:38,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=79, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure a837349f5bb27d71a1248c553065d0cf, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:23:38,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=80, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6f37cd7d66b243795426efcca5f580ca, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:23:38,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=79, resume processing ppid=78 2024-11-21T04:23:38,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, ppid=78, state=SUCCESS, hasLock=false; OpenRegionProcedure a837349f5bb27d71a1248c553065d0cf, server=fdc0a393186c,34363,1732162938185 in 179 msec 2024-11-21T04:23:38,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=77 2024-11-21T04:23:38,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a837349f5bb27d71a1248c553065d0cf, ASSIGN in 337 msec 2024-11-21T04:23:38,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=77, state=SUCCESS, hasLock=false; OpenRegionProcedure 6f37cd7d66b243795426efcca5f580ca, server=fdc0a393186c,42393,1732162938268 in 180 msec 2024-11-21T04:23:38,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-11-21T04:23:38,336 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6f37cd7d66b243795426efcca5f580ca, ASSIGN in 338 msec 2024-11-21T04:23:38,336 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:23:38,336 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163018336"}]},"ts":"1732163018336"} 2024-11-21T04:23:38,338 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-21T04:23:38,338 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:23:38,339 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-21T04:23:38,342 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-21T04:23:38,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:38,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:38,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:38,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:23:38,405 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:38,405 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:38,405 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:38,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-21T04:23:38,407 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 851 msec 2024-11-21T04:23:38,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-21T04:23:38,704 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-21T04:23:38,704 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-11-21T04:23:38,705 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:23:38,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-11-21T04:23:38,714 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:23:38,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-11-21T04:23:38,714 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-21T04:23:38,718 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-21T04:23:38,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163018718 (current time:1732163018718). 2024-11-21T04:23:38,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:23:38,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-21T04:23:38,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:23:38,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@91704a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:38,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:23:38,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:23:38,720 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:23:38,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:23:38,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:23:38,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e8d2f87, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:38,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:23:38,721 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:23:38,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:38,722 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42002, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:23:38,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dd819a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:38,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:23:38,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:23:38,724 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:38,725 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38542, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:38,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:23:38,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:23:38,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:38,727 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:23:38,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:38,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a23e1bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:38,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:23:38,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:23:38,728 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:23:38,728 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:23:38,728 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:23:38,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48902774, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:38,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:23:38,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:23:38,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:38,730 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42034, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:23:38,731 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b73dc0a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:38,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:23:38,732 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:23:38,733 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:38,733 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38554, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:38,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:23:38,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:38,736 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46730, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:38,738 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:23:38,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:23:38,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:38,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:38,738 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:23:38,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-21T04:23:38,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:23:38,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-21T04:23:38,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-21T04:23:38,741 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:23:38,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-21T04:23:38,743 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:23:38,747 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:23:38,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742006_1182 (size=161) 2024-11-21T04:23:38,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742006_1182 (size=161) 2024-11-21T04:23:38,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742006_1182 (size=161) 2024-11-21T04:23:38,760 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:23:38,760 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6f37cd7d66b243795426efcca5f580ca}, {pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a837349f5bb27d71a1248c553065d0cf}] 2024-11-21T04:23:38,762 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,762 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-21T04:23:38,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=82 2024-11-21T04:23:38,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=83 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.HRegion(2603): Flush status journal for a837349f5bb27d71a1248c553065d0cf: 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.HRegion(2603): Flush status journal for 6f37cd7d66b243795426efcca5f580ca: 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. for emptySnaptb0-testConsecutiveExports completed. 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. for emptySnaptb0-testConsecutiveExports completed. 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:23:38,914 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:23:38,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742007_1183 (size=68) 2024-11-21T04:23:38,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742008_1184 (size=68) 2024-11-21T04:23:38,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742007_1183 (size=68) 2024-11-21T04:23:38,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742008_1184 (size=68) 2024-11-21T04:23:38,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:38,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-21T04:23:38,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=82 2024-11-21T04:23:38,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,928 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:38,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:23:38,929 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=83 2024-11-21T04:23:38,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742007_1183 (size=68) 2024-11-21T04:23:38,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742008_1184 (size=68) 2024-11-21T04:23:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=83 2024-11-21T04:23:38,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,930 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:38,930 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6f37cd7d66b243795426efcca5f580ca in 169 msec 2024-11-21T04:23:38,932 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=81 2024-11-21T04:23:38,932 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a837349f5bb27d71a1248c553065d0cf in 170 msec 2024-11-21T04:23:38,932 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:23:38,932 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:23:38,933 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:23:38,933 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-21T04:23:38,934 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-21T04:23:38,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742009_1185 (size=543) 2024-11-21T04:23:38,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742009_1185 (size=543) 2024-11-21T04:23:38,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742009_1185 (size=543) 2024-11-21T04:23:38,948 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:23:38,953 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:23:38,953 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-21T04:23:38,954 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:23:38,954 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-21T04:23:38,956 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 215 msec 2024-11-21T04:23:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-21T04:23:39,063 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-21T04:23:39,069 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='04acb7e72fa21620077f661eac2f69510', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:23:39,070 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='1e82f5057b3d741228a856ec1389b6539', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:23:39,071 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='2e835954b73b4321f8a10e302b6893c72', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:23:39,072 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='3c82d5204a6d13893ca8bf3ea6d6b288f', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:23:39,073 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='402b3c25510e1c1dd7c78bb49de15b30e', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:23:39,074 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='51ce50941cb6e272228bfe7c8c28b938b', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:23:39,078 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42393 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:23:39,080 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:23:39,082 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-21T04:23:39,087 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-21T04:23:39,087 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:39,087 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:23:39,089 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-21T04:23:39,097 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-21T04:23:39,104 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-21T04:23:39,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-21T04:23:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163019108 (current time:1732163019108). 2024-11-21T04:23:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:23:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-21T04:23:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:23:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a9ac6a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:23:39,109 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-21T04:23:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:23:39,110 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:23:39,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:23:39,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:23:39,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f16bf77, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:39,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:23:39,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:23:39,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:39,112 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42056, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:23:39,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30cf140f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:23:39,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:23:39,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:39,116 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38568, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:39,118 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:23:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:23:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:39,118 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:23:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53f1ded8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:23:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:23:39,122 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:23:39,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:23:39,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:23:39,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@745a4dd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:39,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:23:39,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:23:39,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:39,125 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42070, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:23:39,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39c6bcd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:23:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:23:39,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:23:39,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:39,129 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38582, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:39,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:23:39,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:23:39,133 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46746, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:23:39,134 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:23:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:23:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:23:39,134 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:23:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-21T04:23:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:23:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-21T04:23:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-21T04:23:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-21T04:23:39,137 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:23:39,140 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:23:39,144 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:23:39,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742010_1186 (size=156) 2024-11-21T04:23:39,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742010_1186 (size=156) 2024-11-21T04:23:39,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742010_1186 (size=156) 2024-11-21T04:23:39,164 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:23:39,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6f37cd7d66b243795426efcca5f580ca}, {pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a837349f5bb27d71a1248c553065d0cf}] 2024-11-21T04:23:39,165 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:39,166 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:39,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-21T04:23:39,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=86 2024-11-21T04:23:39,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:23:39,318 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2902): Flushing a837349f5bb27d71a1248c553065d0cf 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-21T04:23:39,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=85 2024-11-21T04:23:39,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:39,319 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2902): Flushing 6f37cd7d66b243795426efcca5f580ca 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-21T04:23:39,337 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/.tmp/cf/d4ee0b46a8404d12956155bd59ecabac is 71, key is 00753e7ec087021bf18835bf26ace8c5/cf:q/1732163019077/Put/seqid=0 2024-11-21T04:23:39,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742011_1187 (size=5490) 2024-11-21T04:23:39,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742011_1187 (size=5490) 2024-11-21T04:23:39,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742011_1187 (size=5490) 2024-11-21T04:23:39,343 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/.tmp/cf/d4ee0b46a8404d12956155bd59ecabac 2024-11-21T04:23:39,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/.tmp/cf/b0456153f2f5427f9fc5d0a5cee04a4c is 71, key is 10ebe6371ddc0a37ad7ac6891306b99f/cf:q/1732163019080/Put/seqid=0 2024-11-21T04:23:39,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/.tmp/cf/d4ee0b46a8404d12956155bd59ecabac as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/cf/d4ee0b46a8404d12956155bd59ecabac 2024-11-21T04:23:39,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742012_1188 (size=8120) 2024-11-21T04:23:39,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742012_1188 (size=8120) 2024-11-21T04:23:39,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742012_1188 (size=8120) 2024-11-21T04:23:39,353 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/.tmp/cf/b0456153f2f5427f9fc5d0a5cee04a4c 2024-11-21T04:23:39,356 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/cf/d4ee0b46a8404d12956155bd59ecabac, entries=6, sequenceid=6, filesize=5.4 K 2024-11-21T04:23:39,357 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 6f37cd7d66b243795426efcca5f580ca in 38ms, sequenceid=6, compaction requested=false 2024-11-21T04:23:39,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2603): Flush status journal for 6f37cd7d66b243795426efcca5f580ca: 2024-11-21T04:23:39,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. for snaptb0-testConsecutiveExports completed. 2024-11-21T04:23:39,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-21T04:23:39,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:23:39,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/cf/d4ee0b46a8404d12956155bd59ecabac] hfiles 2024-11-21T04:23:39,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/cf/d4ee0b46a8404d12956155bd59ecabac for snapshot=snaptb0-testConsecutiveExports 2024-11-21T04:23:39,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/.tmp/cf/b0456153f2f5427f9fc5d0a5cee04a4c as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/cf/b0456153f2f5427f9fc5d0a5cee04a4c 2024-11-21T04:23:39,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742013_1189 (size=107) 2024-11-21T04:23:39,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742013_1189 (size=107) 2024-11-21T04:23:39,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742013_1189 (size=107) 2024-11-21T04:23:39,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:23:39,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=85 2024-11-21T04:23:39,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=85 2024-11-21T04:23:39,366 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:39,366 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:23:39,366 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/cf/b0456153f2f5427f9fc5d0a5cee04a4c, entries=44, sequenceid=6, filesize=7.9 K 2024-11-21T04:23:39,367 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for a837349f5bb27d71a1248c553065d0cf in 49ms, sequenceid=6, compaction requested=false 2024-11-21T04:23:39,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2603): Flush status journal for a837349f5bb27d71a1248c553065d0cf: 2024-11-21T04:23:39,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. for snaptb0-testConsecutiveExports completed. 2024-11-21T04:23:39,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-21T04:23:39,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:23:39,368 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/cf/b0456153f2f5427f9fc5d0a5cee04a4c] hfiles 2024-11-21T04:23:39,368 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/cf/b0456153f2f5427f9fc5d0a5cee04a4c for snapshot=snaptb0-testConsecutiveExports 2024-11-21T04:23:39,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6f37cd7d66b243795426efcca5f580ca in 203 msec 2024-11-21T04:23:39,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742014_1190 (size=107) 2024-11-21T04:23:39,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742014_1190 (size=107) 2024-11-21T04:23:39,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742014_1190 (size=107) 2024-11-21T04:23:39,374 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:23:39,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-21T04:23:39,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=86 2024-11-21T04:23:39,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:39,375 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:23:39,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-11-21T04:23:39,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a837349f5bb27d71a1248c553065d0cf in 212 msec 2024-11-21T04:23:39,378 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:23:39,379 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:23:39,380 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:23:39,380 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-21T04:23:39,381 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-21T04:23:39,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742015_1191 (size=621) 2024-11-21T04:23:39,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742015_1191 (size=621) 2024-11-21T04:23:39,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742015_1191 (size=621) 2024-11-21T04:23:39,394 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:23:39,400 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:23:39,400 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-21T04:23:39,401 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:23:39,402 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-21T04:23:39,403 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 267 msec 2024-11-21T04:23:39,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-21T04:23:39,452 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-21T04:23:39,453 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452 2024-11-21T04:23:39,453 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452, srcFsUri=hdfs://localhost:44067, srcDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:39,490 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44067, inputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:39,490 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@fac06ab, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-21T04:23:39,492 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T04:23:39,498 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-21T04:23:39,533 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:39,533 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:39,534 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:40,314 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0003/container_1732162945431_0003_01_000002/launch_container.sh] 2024-11-21T04:23:40,314 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0003/container_1732162945431_0003_01_000002/container_tokens] 2024-11-21T04:23:40,314 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0003/container_1732162945431_0003_01_000002/sysfs] 2024-11-21T04:23:40,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-11386637156341170889.jar 2024-11-21T04:23:40,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:40,498 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:40,564 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-7361534515583584401.jar 2024-11-21T04:23:40,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:40,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:40,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:40,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:40,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:40,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:40,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T04:23:40,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T04:23:40,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T04:23:40,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T04:23:40,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T04:23:40,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T04:23:40,571 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T04:23:40,572 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T04:23:40,572 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T04:23:40,572 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T04:23:40,573 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T04:23:40,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:40,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:40,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:40,575 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:40,575 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:40,576 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:40,576 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:40,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742016_1192 (size=131440) 2024-11-21T04:23:40,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742016_1192 (size=131440) 2024-11-21T04:23:40,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742016_1192 (size=131440) 2024-11-21T04:23:40,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742017_1193 (size=4188619) 2024-11-21T04:23:40,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742017_1193 (size=4188619) 2024-11-21T04:23:40,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742017_1193 (size=4188619) 2024-11-21T04:23:40,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742018_1194 (size=1323991) 2024-11-21T04:23:40,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742018_1194 (size=1323991) 2024-11-21T04:23:40,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742018_1194 (size=1323991) 2024-11-21T04:23:40,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742019_1195 (size=903734) 2024-11-21T04:23:40,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742019_1195 (size=903734) 2024-11-21T04:23:40,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742019_1195 (size=903734) 2024-11-21T04:23:40,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742020_1196 (size=8360083) 2024-11-21T04:23:40,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742020_1196 (size=8360083) 2024-11-21T04:23:40,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742020_1196 (size=8360083) 2024-11-21T04:23:40,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742021_1197 (size=1877034) 2024-11-21T04:23:40,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742021_1197 (size=1877034) 2024-11-21T04:23:40,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742021_1197 (size=1877034) 2024-11-21T04:23:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742022_1198 (size=77835) 2024-11-21T04:23:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742022_1198 (size=77835) 2024-11-21T04:23:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742022_1198 (size=77835) 2024-11-21T04:23:40,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742023_1199 (size=30949) 2024-11-21T04:23:40,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742023_1199 (size=30949) 2024-11-21T04:23:40,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742023_1199 (size=30949) 2024-11-21T04:23:40,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742024_1200 (size=1597327) 2024-11-21T04:23:40,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742024_1200 (size=1597327) 2024-11-21T04:23:40,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742024_1200 (size=1597327) 2024-11-21T04:23:40,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742025_1201 (size=4695811) 2024-11-21T04:23:40,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742025_1201 (size=4695811) 2024-11-21T04:23:40,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742025_1201 (size=4695811) 2024-11-21T04:23:40,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742026_1202 (size=232957) 2024-11-21T04:23:40,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742026_1202 (size=232957) 2024-11-21T04:23:40,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742026_1202 (size=232957) 2024-11-21T04:23:40,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742027_1203 (size=127628) 2024-11-21T04:23:40,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742027_1203 (size=127628) 2024-11-21T04:23:40,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742027_1203 (size=127628) 2024-11-21T04:23:41,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742028_1204 (size=20406) 2024-11-21T04:23:41,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742028_1204 (size=20406) 2024-11-21T04:23:41,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742028_1204 (size=20406) 2024-11-21T04:23:41,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742029_1205 (size=6424740) 2024-11-21T04:23:41,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742029_1205 (size=6424740) 2024-11-21T04:23:41,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742029_1205 (size=6424740) 2024-11-21T04:23:41,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742030_1206 (size=5175431) 2024-11-21T04:23:41,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742030_1206 (size=5175431) 2024-11-21T04:23:41,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742030_1206 (size=5175431) 2024-11-21T04:23:41,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742031_1207 (size=217634) 2024-11-21T04:23:41,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742031_1207 (size=217634) 2024-11-21T04:23:41,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742031_1207 (size=217634) 2024-11-21T04:23:41,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742032_1208 (size=1832290) 2024-11-21T04:23:41,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742032_1208 (size=1832290) 2024-11-21T04:23:41,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742032_1208 (size=1832290) 2024-11-21T04:23:41,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742033_1209 (size=322274) 2024-11-21T04:23:41,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742033_1209 (size=322274) 2024-11-21T04:23:41,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742033_1209 (size=322274) 2024-11-21T04:23:41,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742034_1210 (size=440656) 2024-11-21T04:23:41,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742034_1210 (size=440656) 2024-11-21T04:23:41,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742034_1210 (size=440656) 2024-11-21T04:23:41,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742035_1211 (size=503880) 2024-11-21T04:23:41,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742035_1211 (size=503880) 2024-11-21T04:23:41,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742035_1211 (size=503880) 2024-11-21T04:23:41,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742036_1212 (size=29229) 2024-11-21T04:23:41,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742036_1212 (size=29229) 2024-11-21T04:23:41,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742036_1212 (size=29229) 2024-11-21T04:23:41,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742037_1213 (size=24096) 2024-11-21T04:23:41,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742037_1213 (size=24096) 2024-11-21T04:23:41,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742037_1213 (size=24096) 2024-11-21T04:23:41,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742038_1214 (size=111872) 2024-11-21T04:23:41,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742038_1214 (size=111872) 2024-11-21T04:23:41,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742038_1214 (size=111872) 2024-11-21T04:23:41,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742039_1215 (size=45609) 2024-11-21T04:23:41,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742039_1215 (size=45609) 2024-11-21T04:23:41,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742039_1215 (size=45609) 2024-11-21T04:23:41,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742040_1216 (size=136454) 2024-11-21T04:23:41,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742040_1216 (size=136454) 2024-11-21T04:23:41,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742040_1216 (size=136454) 2024-11-21T04:23:41,329 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T04:23:41,331 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-21T04:23:41,333 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T04:23:41,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742041_1217 (size=338) 2024-11-21T04:23:41,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742041_1217 (size=338) 2024-11-21T04:23:41,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742041_1217 (size=338) 2024-11-21T04:23:41,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742042_1218 (size=15) 2024-11-21T04:23:41,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742042_1218 (size=15) 2024-11-21T04:23:41,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742042_1218 (size=15) 2024-11-21T04:23:41,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742043_1219 (size=303779) 2024-11-21T04:23:41,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742043_1219 (size=303779) 2024-11-21T04:23:41,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742043_1219 (size=303779) 2024-11-21T04:23:41,445 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:23:41,445 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:23:41,448 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0003_000001 (auth:SIMPLE) from 127.0.0.1:52646 2024-11-21T04:23:41,463 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_3/usercache/jenkins/appcache/application_1732162945431_0003/container_1732162945431_0003_01_000001/launch_container.sh] 2024-11-21T04:23:41,463 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_3/usercache/jenkins/appcache/application_1732162945431_0003/container_1732162945431_0003_01_000001/container_tokens] 2024-11-21T04:23:41,463 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_3/usercache/jenkins/appcache/application_1732162945431_0003/container_1732162945431_0003_01_000001/sysfs] 2024-11-21T04:23:42,148 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0004_000001 (auth:SIMPLE) from 127.0.0.1:45028 2024-11-21T04:23:42,746 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:23:46,349 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:23:47,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-21T04:23:47,560 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-21T04:23:48,702 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0004_000001 (auth:SIMPLE) from 127.0.0.1:47398 2024-11-21T04:23:49,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742044_1220 (size=349429) 2024-11-21T04:23:49,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742044_1220 (size=349429) 2024-11-21T04:23:49,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742044_1220 (size=349429) 2024-11-21T04:23:50,952 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0004_000001 (auth:SIMPLE) from 127.0.0.1:45042 2024-11-21T04:23:53,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742045_1221 (size=17447) 2024-11-21T04:23:53,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742045_1221 (size=17447) 2024-11-21T04:23:53,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742045_1221 (size=17447) 2024-11-21T04:23:53,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742046_1222 (size=462) 2024-11-21T04:23:53,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742046_1222 (size=462) 2024-11-21T04:23:53,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742046_1222 (size=462) 2024-11-21T04:23:53,912 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0004/container_1732162945431_0004_01_000002/launch_container.sh] 2024-11-21T04:23:53,912 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0004/container_1732162945431_0004_01_000002/container_tokens] 2024-11-21T04:23:53,912 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0004/container_1732162945431_0004_01_000002/sysfs] 2024-11-21T04:23:53,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742047_1223 (size=17447) 2024-11-21T04:23:53,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742047_1223 (size=17447) 2024-11-21T04:23:53,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742047_1223 (size=17447) 2024-11-21T04:23:53,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742048_1224 (size=349429) 2024-11-21T04:23:53,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742048_1224 (size=349429) 2024-11-21T04:23:53,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742048_1224 (size=349429) 2024-11-21T04:23:54,018 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0004_000001 (auth:SIMPLE) from 127.0.0.1:56946 2024-11-21T04:23:55,522 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T04:23:55,522 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T04:23:55,525 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-21T04:23:55,525 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T04:23:55,525 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T04:23:55,526 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-21T04:23:55,527 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-21T04:23:55,527 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-21T04:23:55,527 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@fac06ab in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-21T04:23:55,527 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-21T04:23:55,527 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-21T04:23:55,529 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452, srcFsUri=hdfs://localhost:44067, srcDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:55,563 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44067, inputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:23:55,563 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@fac06ab, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-21T04:23:55,566 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T04:23:55,572 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-21T04:23:55,584 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:55,585 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:55,585 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:56,646 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-12464613697883076917.jar 2024-11-21T04:23:56,646 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:56,647 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:56,721 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-1830006599758575635.jar 2024-11-21T04:23:56,721 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:56,721 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:56,722 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:56,722 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:56,722 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:56,723 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:23:56,723 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T04:23:56,723 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T04:23:56,723 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T04:23:56,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T04:23:56,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T04:23:56,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T04:23:56,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T04:23:56,725 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T04:23:56,725 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T04:23:56,725 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T04:23:56,726 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T04:23:56,726 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:56,727 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:56,727 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:56,727 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:56,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:23:56,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:56,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:23:56,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742049_1225 (size=131440) 2024-11-21T04:23:56,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742049_1225 (size=131440) 2024-11-21T04:23:56,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742049_1225 (size=131440) 2024-11-21T04:23:56,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742050_1226 (size=4188619) 2024-11-21T04:23:56,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742050_1226 (size=4188619) 2024-11-21T04:23:56,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742050_1226 (size=4188619) 2024-11-21T04:23:56,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742051_1227 (size=1323991) 2024-11-21T04:23:56,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742051_1227 (size=1323991) 2024-11-21T04:23:56,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742051_1227 (size=1323991) 2024-11-21T04:23:56,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742052_1228 (size=440656) 2024-11-21T04:23:56,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742052_1228 (size=440656) 2024-11-21T04:23:56,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742052_1228 (size=440656) 2024-11-21T04:23:56,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742053_1229 (size=903734) 2024-11-21T04:23:56,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742053_1229 (size=903734) 2024-11-21T04:23:56,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742053_1229 (size=903734) 2024-11-21T04:23:56,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742054_1230 (size=8360083) 2024-11-21T04:23:56,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742054_1230 (size=8360083) 2024-11-21T04:23:56,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742054_1230 (size=8360083) 2024-11-21T04:23:56,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742055_1231 (size=1877034) 2024-11-21T04:23:56,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742055_1231 (size=1877034) 2024-11-21T04:23:56,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742055_1231 (size=1877034) 2024-11-21T04:23:56,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742056_1232 (size=77835) 2024-11-21T04:23:56,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742056_1232 (size=77835) 2024-11-21T04:23:56,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742056_1232 (size=77835) 2024-11-21T04:23:56,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742057_1233 (size=30949) 2024-11-21T04:23:56,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742057_1233 (size=30949) 2024-11-21T04:23:56,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742057_1233 (size=30949) 2024-11-21T04:23:56,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742058_1234 (size=1597327) 2024-11-21T04:23:56,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742058_1234 (size=1597327) 2024-11-21T04:23:56,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742058_1234 (size=1597327) 2024-11-21T04:23:56,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742059_1235 (size=4695811) 2024-11-21T04:23:56,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742059_1235 (size=4695811) 2024-11-21T04:23:56,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742059_1235 (size=4695811) 2024-11-21T04:23:56,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742060_1236 (size=232957) 2024-11-21T04:23:56,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742060_1236 (size=232957) 2024-11-21T04:23:56,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742060_1236 (size=232957) 2024-11-21T04:23:56,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742061_1237 (size=127628) 2024-11-21T04:23:56,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742061_1237 (size=127628) 2024-11-21T04:23:56,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742061_1237 (size=127628) 2024-11-21T04:23:56,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742062_1238 (size=20406) 2024-11-21T04:23:56,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742062_1238 (size=20406) 2024-11-21T04:23:56,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742062_1238 (size=20406) 2024-11-21T04:23:57,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742063_1239 (size=5175431) 2024-11-21T04:23:57,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742063_1239 (size=5175431) 2024-11-21T04:23:57,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742063_1239 (size=5175431) 2024-11-21T04:23:57,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742064_1240 (size=217634) 2024-11-21T04:23:57,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742064_1240 (size=217634) 2024-11-21T04:23:57,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742064_1240 (size=217634) 2024-11-21T04:23:57,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742065_1241 (size=1832290) 2024-11-21T04:23:57,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742065_1241 (size=1832290) 2024-11-21T04:23:57,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742065_1241 (size=1832290) 2024-11-21T04:23:57,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742066_1242 (size=322274) 2024-11-21T04:23:57,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742066_1242 (size=322274) 2024-11-21T04:23:57,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742066_1242 (size=322274) 2024-11-21T04:23:57,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742067_1243 (size=503880) 2024-11-21T04:23:57,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742067_1243 (size=503880) 2024-11-21T04:23:57,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742067_1243 (size=503880) 2024-11-21T04:23:57,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742068_1244 (size=6424740) 2024-11-21T04:23:57,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742068_1244 (size=6424740) 2024-11-21T04:23:57,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742068_1244 (size=6424740) 2024-11-21T04:23:57,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742069_1245 (size=29229) 2024-11-21T04:23:57,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742069_1245 (size=29229) 2024-11-21T04:23:57,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742069_1245 (size=29229) 2024-11-21T04:23:57,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742070_1246 (size=24096) 2024-11-21T04:23:57,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742070_1246 (size=24096) 2024-11-21T04:23:57,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742070_1246 (size=24096) 2024-11-21T04:23:57,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742071_1247 (size=111872) 2024-11-21T04:23:57,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742071_1247 (size=111872) 2024-11-21T04:23:57,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742071_1247 (size=111872) 2024-11-21T04:23:57,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742072_1248 (size=45609) 2024-11-21T04:23:57,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742072_1248 (size=45609) 2024-11-21T04:23:57,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742072_1248 (size=45609) 2024-11-21T04:23:57,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742073_1249 (size=136454) 2024-11-21T04:23:57,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742073_1249 (size=136454) 2024-11-21T04:23:57,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742073_1249 (size=136454) 2024-11-21T04:23:57,097 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T04:23:57,099 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-21T04:23:57,101 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T04:23:57,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742074_1250 (size=338) 2024-11-21T04:23:57,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742074_1250 (size=338) 2024-11-21T04:23:57,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742074_1250 (size=338) 2024-11-21T04:23:57,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742075_1251 (size=15) 2024-11-21T04:23:57,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742075_1251 (size=15) 2024-11-21T04:23:57,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742075_1251 (size=15) 2024-11-21T04:23:57,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742076_1252 (size=303779) 2024-11-21T04:23:57,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742076_1252 (size=303779) 2024-11-21T04:23:57,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742076_1252 (size=303779) 2024-11-21T04:24:00,101 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:24:00,101 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:24:00,104 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0004_000001 (auth:SIMPLE) from 127.0.0.1:56962 2024-11-21T04:24:00,114 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0004/container_1732162945431_0004_01_000001/launch_container.sh] 2024-11-21T04:24:00,114 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0004/container_1732162945431_0004_01_000001/container_tokens] 2024-11-21T04:24:00,114 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0004/container_1732162945431_0004_01_000001/sysfs] 2024-11-21T04:24:00,462 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0005_000001 (auth:SIMPLE) from 127.0.0.1:39190 2024-11-21T04:24:05,165 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0005_000001 (auth:SIMPLE) from 127.0.0.1:55256 2024-11-21T04:24:05,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742077_1253 (size=349429) 2024-11-21T04:24:05,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742077_1253 (size=349429) 2024-11-21T04:24:05,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742077_1253 (size=349429) 2024-11-21T04:24:07,477 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0005_000001 (auth:SIMPLE) from 127.0.0.1:53702 2024-11-21T04:24:10,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742078_1254 (size=16925) 2024-11-21T04:24:10,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742078_1254 (size=16925) 2024-11-21T04:24:10,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742078_1254 (size=16925) 2024-11-21T04:24:10,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742079_1255 (size=462) 2024-11-21T04:24:10,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742079_1255 (size=462) 2024-11-21T04:24:10,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742079_1255 (size=462) 2024-11-21T04:24:10,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742080_1256 (size=16925) 2024-11-21T04:24:10,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742080_1256 (size=16925) 2024-11-21T04:24:10,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742080_1256 (size=16925) 2024-11-21T04:24:10,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742081_1257 (size=349429) 2024-11-21T04:24:10,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742081_1257 (size=349429) 2024-11-21T04:24:10,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742081_1257 (size=349429) 2024-11-21T04:24:10,959 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0005/container_1732162945431_0005_01_000002/launch_container.sh] 2024-11-21T04:24:10,959 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0005/container_1732162945431_0005_01_000002/container_tokens] 2024-11-21T04:24:10,960 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0005/container_1732162945431_0005_01_000002/sysfs] 2024-11-21T04:24:10,967 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0005_000001 (auth:SIMPLE) from 127.0.0.1:53710 2024-11-21T04:24:12,475 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T04:24:12,476 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T04:24:12,479 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-21T04:24:12,479 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T04:24:12,479 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T04:24:12,479 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-21T04:24:12,482 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-21T04:24:12,482 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-21T04:24:12,482 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@fac06ab in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-21T04:24:12,482 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-21T04:24:12,483 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163019452/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-21T04:24:12,504 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-11-21T04:24:12,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-21T04:24:12,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-21T04:24:12,509 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163052509"}]},"ts":"1732163052509"} 2024-11-21T04:24:12,512 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-21T04:24:12,512 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-21T04:24:12,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-21T04:24:12,516 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6f37cd7d66b243795426efcca5f580ca, UNASSIGN}, {pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a837349f5bb27d71a1248c553065d0cf, UNASSIGN}] 2024-11-21T04:24:12,517 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6f37cd7d66b243795426efcca5f580ca, UNASSIGN 2024-11-21T04:24:12,518 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a837349f5bb27d71a1248c553065d0cf, UNASSIGN 2024-11-21T04:24:12,519 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=6f37cd7d66b243795426efcca5f580ca, regionState=CLOSING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:12,519 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=a837349f5bb27d71a1248c553065d0cf, regionState=CLOSING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:12,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6f37cd7d66b243795426efcca5f580ca, UNASSIGN because future has completed 2024-11-21T04:24:12,523 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:24:12,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6f37cd7d66b243795426efcca5f580ca, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:24:12,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a837349f5bb27d71a1248c553065d0cf, UNASSIGN because future has completed 2024-11-21T04:24:12,524 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:24:12,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=92, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure a837349f5bb27d71a1248c553065d0cf, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:24:12,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-21T04:24:12,677 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(122): Close 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:24:12,677 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:24:12,677 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1722): Closing 6f37cd7d66b243795426efcca5f580ca, disabling compactions & flushes 2024-11-21T04:24:12,677 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:24:12,677 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:24:12,677 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. after waiting 0 ms 2024-11-21T04:24:12,677 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:24:12,678 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(122): Close a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:24:12,678 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:24:12,678 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1722): Closing a837349f5bb27d71a1248c553065d0cf, disabling compactions & flushes 2024-11-21T04:24:12,678 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:24:12,678 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:24:12,678 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. after waiting 0 ms 2024-11-21T04:24:12,678 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:24:12,700 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:24:12,701 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:24:12,701 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca. 2024-11-21T04:24:12,701 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1676): Region close journal for 6f37cd7d66b243795426efcca5f580ca: Waiting for close lock at 1732163052677Running coprocessor pre-close hooks at 1732163052677Disabling compacts and flushes for region at 1732163052677Disabling writes for close at 1732163052677Writing region close event to WAL at 1732163052678 (+1 ms)Running coprocessor post-close hooks at 1732163052701 (+23 ms)Closed at 1732163052701 2024-11-21T04:24:12,704 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(157): Closed 6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:24:12,704 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=6f37cd7d66b243795426efcca5f580ca, regionState=CLOSED 2024-11-21T04:24:12,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6f37cd7d66b243795426efcca5f580ca, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:24:12,708 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:24:12,709 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:24:12,709 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf. 2024-11-21T04:24:12,709 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1676): Region close journal for a837349f5bb27d71a1248c553065d0cf: Waiting for close lock at 1732163052678Running coprocessor pre-close hooks at 1732163052678Disabling compacts and flushes for region at 1732163052678Disabling writes for close at 1732163052678Writing region close event to WAL at 1732163052680 (+2 ms)Running coprocessor post-close hooks at 1732163052709 (+29 ms)Closed at 1732163052709 2024-11-21T04:24:12,712 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(157): Closed a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:24:12,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=89 2024-11-21T04:24:12,714 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=a837349f5bb27d71a1248c553065d0cf, regionState=CLOSED 2024-11-21T04:24:12,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=89, state=SUCCESS, hasLock=false; CloseRegionProcedure 6f37cd7d66b243795426efcca5f580ca, server=fdc0a393186c,42393,1732162938268 in 184 msec 2024-11-21T04:24:12,717 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6f37cd7d66b243795426efcca5f580ca, UNASSIGN in 198 msec 2024-11-21T04:24:12,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure a837349f5bb27d71a1248c553065d0cf, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:24:12,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-11-21T04:24:12,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; CloseRegionProcedure a837349f5bb27d71a1248c553065d0cf, server=fdc0a393186c,34363,1732162938185 in 197 msec 2024-11-21T04:24:12,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=88 2024-11-21T04:24:12,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a837349f5bb27d71a1248c553065d0cf, UNASSIGN in 207 msec 2024-11-21T04:24:12,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=88, resume processing ppid=87 2024-11-21T04:24:12,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, ppid=87, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 214 msec 2024-11-21T04:24:12,733 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163052733"}]},"ts":"1732163052733"} 2024-11-21T04:24:12,737 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-21T04:24:12,737 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-21T04:24:12,740 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 234 msec 2024-11-21T04:24:12,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-21T04:24:12,823 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-21T04:24:12,823 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-11-21T04:24:12,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T04:24:12,826 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T04:24:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-21T04:24:12,827 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T04:24:12,834 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:24:12,835 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-21T04:24:12,838 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/recovered.edits] 2024-11-21T04:24:12,839 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:24:12,842 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/recovered.edits] 2024-11-21T04:24:12,843 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/cf/d4ee0b46a8404d12956155bd59ecabac to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/cf/d4ee0b46a8404d12956155bd59ecabac 2024-11-21T04:24:12,854 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca/recovered.edits/9.seqid 2024-11-21T04:24:12,854 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/6f37cd7d66b243795426efcca5f580ca 2024-11-21T04:24:12,855 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/cf/b0456153f2f5427f9fc5d0a5cee04a4c to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/cf/b0456153f2f5427f9fc5d0a5cee04a4c 2024-11-21T04:24:12,862 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf/recovered.edits/9.seqid 2024-11-21T04:24:12,863 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testConsecutiveExports/a837349f5bb27d71a1248c553065d0cf 2024-11-21T04:24:12,863 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-21T04:24:12,867 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T04:24:12,874 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-21T04:24:12,878 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-21T04:24:12,881 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T04:24:12,881 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-21T04:24:12,882 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163052881"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:12,882 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163052881"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:12,885 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T04:24:12,886 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6f37cd7d66b243795426efcca5f580ca, NAME => 'testtb-testConsecutiveExports,,1732163017554.6f37cd7d66b243795426efcca5f580ca.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a837349f5bb27d71a1248c553065d0cf, NAME => 'testtb-testConsecutiveExports,1,1732163017554.a837349f5bb27d71a1248c553065d0cf.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T04:24:12,886 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-21T04:24:12,886 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732163052886"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:12,889 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-21T04:24:12,891 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-21T04:24:12,894 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 68 msec 2024-11-21T04:24:12,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T04:24:12,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T04:24:12,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T04:24:12,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T04:24:12,944 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-21T04:24:12,944 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-21T04:24:12,944 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-21T04:24:12,944 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-21T04:24:12,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T04:24:12,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:12,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T04:24:12,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:12,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T04:24:12,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:12,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-21T04:24:12,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:12,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-21T04:24:12,953 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-21T04:24:12,953 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-21T04:24:12,962 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-21T04:24:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-21T04:24:12,966 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-21T04:24:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-21T04:24:12,989 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=793 (was 791) Potentially hanging thread: Thread-3705 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:56372 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 125887) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46245 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:34508 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:55894 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (625843813) connection to localhost/127.0.0.1:46245 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_415535460_1 at /127.0.0.1:44220 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36543 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=802 (was 802), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=620 (was 696), ProcessCount=22 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=820 (was 1272) 2024-11-21T04:24:12,990 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-11-21T04:24:13,011 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=793, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=620, ProcessCount=22, AvailableMemoryMB=816 2024-11-21T04:24:13,012 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-11-21T04:24:13,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:24:13,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:13,022 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:24:13,022 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:13,023 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 94 2024-11-21T04:24:13,023 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:24:13,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-21T04:24:13,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742082_1258 (size=422) 2024-11-21T04:24:13,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742082_1258 (size=422) 2024-11-21T04:24:13,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742082_1258 (size=422) 2024-11-21T04:24:13,047 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 65c69c31a4fbeb4741a9756ecf63ce3d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:13,047 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 657ef1187faef80e8de0106b2a68582c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:13,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742084_1260 (size=83) 2024-11-21T04:24:13,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742084_1260 (size=83) 2024-11-21T04:24:13,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742084_1260 (size=83) 2024-11-21T04:24:13,064 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:13,064 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 65c69c31a4fbeb4741a9756ecf63ce3d, disabling compactions & flushes 2024-11-21T04:24:13,064 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:13,064 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:13,064 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. after waiting 0 ms 2024-11-21T04:24:13,064 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:13,064 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:13,064 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 65c69c31a4fbeb4741a9756ecf63ce3d: Waiting for close lock at 1732163053064Disabling compacts and flushes for region at 1732163053064Disabling writes for close at 1732163053064Writing region close event to WAL at 1732163053064Closed at 1732163053064 2024-11-21T04:24:13,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742083_1259 (size=83) 2024-11-21T04:24:13,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742083_1259 (size=83) 2024-11-21T04:24:13,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742083_1259 (size=83) 2024-11-21T04:24:13,069 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:13,070 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 657ef1187faef80e8de0106b2a68582c, disabling compactions & flushes 2024-11-21T04:24:13,070 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:13,070 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:13,070 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. after waiting 0 ms 2024-11-21T04:24:13,070 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:13,070 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:13,070 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 657ef1187faef80e8de0106b2a68582c: Waiting for close lock at 1732163053070Disabling compacts and flushes for region at 1732163053070Disabling writes for close at 1732163053070Writing region close event to WAL at 1732163053070Closed at 1732163053070 2024-11-21T04:24:13,071 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:24:13,072 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732163053071"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163053071"}]},"ts":"1732163053071"} 2024-11-21T04:24:13,072 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732163053071"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163053071"}]},"ts":"1732163053071"} 2024-11-21T04:24:13,074 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:24:13,075 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:24:13,076 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163053076"}]},"ts":"1732163053076"} 2024-11-21T04:24:13,078 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-21T04:24:13,078 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:24:13,079 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:24:13,080 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:24:13,080 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:24:13,080 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:24:13,080 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:24:13,080 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:24:13,080 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:24:13,080 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:24:13,080 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:24:13,080 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:24:13,080 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65c69c31a4fbeb4741a9756ecf63ce3d, ASSIGN}, {pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=657ef1187faef80e8de0106b2a68582c, ASSIGN}] 2024-11-21T04:24:13,081 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=657ef1187faef80e8de0106b2a68582c, ASSIGN 2024-11-21T04:24:13,082 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65c69c31a4fbeb4741a9756ecf63ce3d, ASSIGN 2024-11-21T04:24:13,082 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=657ef1187faef80e8de0106b2a68582c, ASSIGN; state=OFFLINE, location=fdc0a393186c,39347,1732162938035; forceNewPlan=false, retain=false 2024-11-21T04:24:13,082 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65c69c31a4fbeb4741a9756ecf63ce3d, ASSIGN; state=OFFLINE, location=fdc0a393186c,34363,1732162938185; forceNewPlan=false, retain=false 2024-11-21T04:24:13,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-21T04:24:13,233 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:24:13,233 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=657ef1187faef80e8de0106b2a68582c, regionState=OPENING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:24:13,233 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=65c69c31a4fbeb4741a9756ecf63ce3d, regionState=OPENING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:13,237 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65c69c31a4fbeb4741a9756ecf63ce3d, ASSIGN because future has completed 2024-11-21T04:24:13,237 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:24:13,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=657ef1187faef80e8de0106b2a68582c, ASSIGN because future has completed 2024-11-21T04:24:13,239 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=98, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 657ef1187faef80e8de0106b2a68582c, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:24:13,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-21T04:24:13,394 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:13,394 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7752): Opening region: {ENCODED => 65c69c31a4fbeb4741a9756ecf63ce3d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T04:24:13,394 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. service=AccessControlService 2024-11-21T04:24:13,395 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:24:13,395 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,395 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:13,395 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7794): checking encryption for 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,395 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7797): checking classloading for 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,396 INFO [StoreOpener-65c69c31a4fbeb4741a9756ecf63ce3d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,397 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:13,397 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7752): Opening region: {ENCODED => 657ef1187faef80e8de0106b2a68582c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T04:24:13,397 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. service=AccessControlService 2024-11-21T04:24:13,398 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:24:13,398 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,398 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:13,398 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7794): checking encryption for 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,398 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7797): checking classloading for 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,398 INFO [StoreOpener-65c69c31a4fbeb4741a9756ecf63ce3d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 65c69c31a4fbeb4741a9756ecf63ce3d columnFamilyName cf 2024-11-21T04:24:13,398 DEBUG [StoreOpener-65c69c31a4fbeb4741a9756ecf63ce3d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:13,399 INFO [StoreOpener-65c69c31a4fbeb4741a9756ecf63ce3d-1 {}] regionserver.HStore(327): Store=65c69c31a4fbeb4741a9756ecf63ce3d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:24:13,399 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1038): replaying wal for 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,399 INFO [StoreOpener-657ef1187faef80e8de0106b2a68582c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,400 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,401 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,401 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1048): stopping wal replay for 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,401 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1060): Cleaning up temporary data for 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,403 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1093): writing seq id for 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,403 INFO [StoreOpener-657ef1187faef80e8de0106b2a68582c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 657ef1187faef80e8de0106b2a68582c columnFamilyName cf 2024-11-21T04:24:13,403 DEBUG [StoreOpener-657ef1187faef80e8de0106b2a68582c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:13,404 INFO [StoreOpener-657ef1187faef80e8de0106b2a68582c-1 {}] regionserver.HStore(327): Store=657ef1187faef80e8de0106b2a68582c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:24:13,404 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1038): replaying wal for 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,405 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,406 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,406 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:24:13,406 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1048): stopping wal replay for 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,406 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1060): Cleaning up temporary data for 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,407 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1114): Opened 65c69c31a4fbeb4741a9756ecf63ce3d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59667702, jitterRate=-0.11088195443153381}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:24:13,407 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,408 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1006): Region open journal for 65c69c31a4fbeb4741a9756ecf63ce3d: Running coprocessor pre-open hook at 1732163053395Writing region info on filesystem at 1732163053395Initializing all the Stores at 1732163053396 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163053396Cleaning up temporary data from old regions at 1732163053401 (+5 ms)Running coprocessor post-open hooks at 1732163053407 (+6 ms)Region opened successfully at 1732163053408 (+1 ms) 2024-11-21T04:24:13,408 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1093): writing seq id for 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,409 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d., pid=97, masterSystemTime=1732163053390 2024-11-21T04:24:13,411 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:24:13,411 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:13,412 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:13,412 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1114): Opened 657ef1187faef80e8de0106b2a68582c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69343586, jitterRate=0.03329995274543762}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:24:13,412 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,412 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=65c69c31a4fbeb4741a9756ecf63ce3d, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:13,412 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1006): Region open journal for 657ef1187faef80e8de0106b2a68582c: Running coprocessor pre-open hook at 1732163053398Writing region info on filesystem at 1732163053398Initializing all the Stores at 1732163053399 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163053399Cleaning up temporary data from old regions at 1732163053406 (+7 ms)Running coprocessor post-open hooks at 1732163053412 (+6 ms)Region opened successfully at 1732163053412 2024-11-21T04:24:13,413 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c., pid=98, masterSystemTime=1732163053392 2024-11-21T04:24:13,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:24:13,415 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:13,415 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:13,416 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=657ef1187faef80e8de0106b2a68582c, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:24:13,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure 657ef1187faef80e8de0106b2a68582c, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:24:13,418 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=95 2024-11-21T04:24:13,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=95, state=SUCCESS, hasLock=false; OpenRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d, server=fdc0a393186c,34363,1732162938185 in 179 msec 2024-11-21T04:24:13,420 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65c69c31a4fbeb4741a9756ecf63ce3d, ASSIGN in 339 msec 2024-11-21T04:24:13,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=96 2024-11-21T04:24:13,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; OpenRegionProcedure 657ef1187faef80e8de0106b2a68582c, server=fdc0a393186c,39347,1732162938035 in 181 msec 2024-11-21T04:24:13,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=96, resume processing ppid=94 2024-11-21T04:24:13,426 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=657ef1187faef80e8de0106b2a68582c, ASSIGN in 343 msec 2024-11-21T04:24:13,427 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:24:13,427 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163053427"}]},"ts":"1732163053427"} 2024-11-21T04:24:13,429 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-21T04:24:13,431 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:24:13,431 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-21T04:24:13,435 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-21T04:24:13,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:13,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:13,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:13,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:13,617 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:13,618 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:13,619 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:13,620 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:13,621 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 603 msec 2024-11-21T04:24:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-21T04:24:13,653 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-21T04:24:13,653 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-11-21T04:24:13,654 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:13,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-11-21T04:24:13,659 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:13,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-11-21T04:24:13,659 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-21T04:24:13,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-21T04:24:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163053666 (current time:1732163053666). 2024-11-21T04:24:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:24:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-21T04:24:13,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:24:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cd1e45f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:13,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:13,671 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:13,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:13,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:13,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6073ec8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:13,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:13,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:13,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:13,675 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50562, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:13,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c96b865, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:13,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:13,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:13,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:13,679 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56172, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:13,680 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:24:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:13,681 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3feba2fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:13,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:13,684 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:13,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:13,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:13,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f0800b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:13,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:13,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:13,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:13,686 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50578, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:13,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4854165e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:13,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:13,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:13,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:13,689 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56174, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:13,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:13,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:13,693 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45634, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:13,694 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:24:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-21T04:24:13,695 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:13,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:24:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-21T04:24:13,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-21T04:24:13,698 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:24:13,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-21T04:24:13,699 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:24:13,702 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:24:13,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742085_1261 (size=215) 2024-11-21T04:24:13,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742085_1261 (size=215) 2024-11-21T04:24:13,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742085_1261 (size=215) 2024-11-21T04:24:13,725 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:24:13,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d}, {pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 657ef1187faef80e8de0106b2a68582c}] 2024-11-21T04:24:13,726 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,726 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-21T04:24:13,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=100 2024-11-21T04:24:13,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=101 2024-11-21T04:24:13,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:13,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:13,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.HRegion(2603): Flush status journal for 65c69c31a4fbeb4741a9756ecf63ce3d: 2024-11-21T04:24:13,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.HRegion(2603): Flush status journal for 657ef1187faef80e8de0106b2a68582c: 2024-11-21T04:24:13,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-21T04:24:13,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-21T04:24:13,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:13,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:13,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:13,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:13,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:24:13,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:24:13,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742086_1262 (size=86) 2024-11-21T04:24:13,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742086_1262 (size=86) 2024-11-21T04:24:13,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742086_1262 (size=86) 2024-11-21T04:24:13,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:13,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=100 2024-11-21T04:24:13,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=100 2024-11-21T04:24:13,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,894 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:13,897 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d in 170 msec 2024-11-21T04:24:13,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742087_1263 (size=86) 2024-11-21T04:24:13,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742087_1263 (size=86) 2024-11-21T04:24:13,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742087_1263 (size=86) 2024-11-21T04:24:13,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:13,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-21T04:24:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=101 2024-11-21T04:24:13,910 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,911 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:13,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=101, resume processing ppid=99 2024-11-21T04:24:13,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 657ef1187faef80e8de0106b2a68582c in 189 msec 2024-11-21T04:24:13,918 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:24:13,925 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:24:13,926 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:24:13,926 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:13,927 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:13,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742088_1264 (size=597) 2024-11-21T04:24:13,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742088_1264 (size=597) 2024-11-21T04:24:13,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742088_1264 (size=597) 2024-11-21T04:24:13,940 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:24:13,948 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:24:13,948 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:13,950 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:24:13,950 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-21T04:24:13,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 254 msec 2024-11-21T04:24:14,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-21T04:24:14,013 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-21T04:24:14,018 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='016ffffdb99af39fe95a5abbf1a804826', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:24:14,019 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='19c77f5e98681c7c25dc1ec96706c04dd', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c., hostname=fdc0a393186c,39347,1732162938035, seqNum=2] 2024-11-21T04:24:14,020 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='257e10eef2dfac47414caafd4d4ddb36b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c., hostname=fdc0a393186c,39347,1732162938035, seqNum=2] 2024-11-21T04:24:14,021 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='35a28edcda5b1f2226a3b3268af1653b9', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c., hostname=fdc0a393186c,39347,1732162938035, seqNum=2] 2024-11-21T04:24:14,026 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:24:14,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39347 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:24:14,031 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-21T04:24:14,035 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:14,035 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:14,035 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:14,037 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-21T04:24:14,044 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-21T04:24:14,053 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-21T04:24:14,057 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-21T04:24:14,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163054057 (current time:1732163054057). 2024-11-21T04:24:14,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:24:14,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-21T04:24:14,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:24:14,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1353104c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:14,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:14,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:14,059 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:14,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:14,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:14,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f7a47b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:14,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:14,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:14,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:14,061 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50602, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:14,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b70f7cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:14,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:14,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:14,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:14,064 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:14,066 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:24:14,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:14,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:14,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:14,066 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48192136, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:14,071 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:14,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:14,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:14,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13534cb1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:14,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:14,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:14,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:14,072 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50626, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:14,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76d311c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:14,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:14,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:14,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:14,076 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56190, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:14,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:14,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:14,080 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45642, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:14,081 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:24:14,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:14,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:14,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:14,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-21T04:24:14,082 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:14,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:24:14,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-21T04:24:14,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-21T04:24:14,085 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:24:14,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-21T04:24:14,086 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:24:14,089 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:24:14,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742089_1265 (size=210) 2024-11-21T04:24:14,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742089_1265 (size=210) 2024-11-21T04:24:14,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742089_1265 (size=210) 2024-11-21T04:24:14,102 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:24:14,102 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d}, {pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 657ef1187faef80e8de0106b2a68582c}] 2024-11-21T04:24:14,103 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:14,103 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:14,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-21T04:24:14,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-21T04:24:14,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=104 2024-11-21T04:24:14,255 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:14,255 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:14,255 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2902): Flushing 657ef1187faef80e8de0106b2a68582c 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-21T04:24:14,255 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2902): Flushing 65c69c31a4fbeb4741a9756ecf63ce3d 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-21T04:24:14,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/.tmp/cf/b93616a15fd6470f81963a10d04a3ae3 is 71, key is 05d8613fda4e7b502a6dbd427c318e46/cf:q/1732163054026/Put/seqid=0 2024-11-21T04:24:14,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/.tmp/cf/74a7e312d57a48e6a1ef8dee3b5ce3d4 is 71, key is 296892d5efb55b08dc4ff393a94243dc/cf:q/1732163054029/Put/seqid=0 2024-11-21T04:24:14,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742091_1267 (size=8326) 2024-11-21T04:24:14,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742090_1266 (size=5288) 2024-11-21T04:24:14,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742091_1267 (size=8326) 2024-11-21T04:24:14,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742090_1266 (size=5288) 2024-11-21T04:24:14,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742091_1267 (size=8326) 2024-11-21T04:24:14,283 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/.tmp/cf/74a7e312d57a48e6a1ef8dee3b5ce3d4 2024-11-21T04:24:14,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742090_1266 (size=5288) 2024-11-21T04:24:14,286 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/.tmp/cf/b93616a15fd6470f81963a10d04a3ae3 2024-11-21T04:24:14,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/.tmp/cf/74a7e312d57a48e6a1ef8dee3b5ce3d4 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/cf/74a7e312d57a48e6a1ef8dee3b5ce3d4 2024-11-21T04:24:14,291 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/.tmp/cf/b93616a15fd6470f81963a10d04a3ae3 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/cf/b93616a15fd6470f81963a10d04a3ae3 2024-11-21T04:24:14,296 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/cf/74a7e312d57a48e6a1ef8dee3b5ce3d4, entries=47, sequenceid=6, filesize=8.1 K 2024-11-21T04:24:14,297 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 657ef1187faef80e8de0106b2a68582c in 42ms, sequenceid=6, compaction requested=false 2024-11-21T04:24:14,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-21T04:24:14,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2603): Flush status journal for 657ef1187faef80e8de0106b2a68582c: 2024-11-21T04:24:14,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-21T04:24:14,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:14,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:14,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/cf/74a7e312d57a48e6a1ef8dee3b5ce3d4] hfiles 2024-11-21T04:24:14,299 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/cf/b93616a15fd6470f81963a10d04a3ae3, entries=3, sequenceid=6, filesize=5.2 K 2024-11-21T04:24:14,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/cf/74a7e312d57a48e6a1ef8dee3b5ce3d4 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:14,300 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 65c69c31a4fbeb4741a9756ecf63ce3d in 45ms, sequenceid=6, compaction requested=false 2024-11-21T04:24:14,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 65c69c31a4fbeb4741a9756ecf63ce3d: 2024-11-21T04:24:14,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-21T04:24:14,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:14,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:14,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/cf/b93616a15fd6470f81963a10d04a3ae3] hfiles 2024-11-21T04:24:14,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/cf/b93616a15fd6470f81963a10d04a3ae3 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:14,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742092_1268 (size=125) 2024-11-21T04:24:14,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742092_1268 (size=125) 2024-11-21T04:24:14,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742092_1268 (size=125) 2024-11-21T04:24:14,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:14,330 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=104 2024-11-21T04:24:14,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=104 2024-11-21T04:24:14,334 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:14,334 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:14,340 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 657ef1187faef80e8de0106b2a68582c in 234 msec 2024-11-21T04:24:14,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742093_1269 (size=125) 2024-11-21T04:24:14,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742093_1269 (size=125) 2024-11-21T04:24:14,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742093_1269 (size=125) 2024-11-21T04:24:14,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:14,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-21T04:24:14,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-21T04:24:14,345 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:14,345 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:14,348 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:24:14,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=102 2024-11-21T04:24:14,350 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:24:14,351 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:24:14,352 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:14,352 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d in 244 msec 2024-11-21T04:24:14,353 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:14,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742094_1270 (size=675) 2024-11-21T04:24:14,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742094_1270 (size=675) 2024-11-21T04:24:14,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742094_1270 (size=675) 2024-11-21T04:24:14,400 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:24:14,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-21T04:24:14,407 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:24:14,409 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:14,412 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:24:14,412 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-21T04:24:14,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 330 msec 2024-11-21T04:24:14,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-21T04:24:14,713 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-21T04:24:14,736 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T04:24:14,736 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T04:24:14,737 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T04:24:14,737 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56200, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T04:24:14,738 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T04:24:14,738 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34822, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T04:24:14,739 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39347 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-21T04:24:14,739 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42393 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-21T04:24:14,739 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34363 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-21T04:24:14,741 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:24:14,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:14,743 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:24:14,743 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:14,743 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 105 2024-11-21T04:24:14,744 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:24:14,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-21T04:24:14,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742095_1271 (size=399) 2024-11-21T04:24:14,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742095_1271 (size=399) 2024-11-21T04:24:14,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742095_1271 (size=399) 2024-11-21T04:24:14,756 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bb93d80c063cb8f9905037385a95f084, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:14,757 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 6b2ef578f95b8856f03c7eec90dba5fe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:14,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742096_1272 (size=85) 2024-11-21T04:24:14,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742097_1273 (size=85) 2024-11-21T04:24:14,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742096_1272 (size=85) 2024-11-21T04:24:14,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742096_1272 (size=85) 2024-11-21T04:24:14,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742097_1273 (size=85) 2024-11-21T04:24:14,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742097_1273 (size=85) 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 6b2ef578f95b8856f03c7eec90dba5fe, disabling compactions & flushes 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing bb93d80c063cb8f9905037385a95f084, disabling compactions & flushes 2024-11-21T04:24:14,767 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. 2024-11-21T04:24:14,767 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. after waiting 0 ms 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. after waiting 0 ms 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. 2024-11-21T04:24:14,767 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. 2024-11-21T04:24:14,767 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 6b2ef578f95b8856f03c7eec90dba5fe: Waiting for close lock at 1732163054767Disabling compacts and flushes for region at 1732163054767Disabling writes for close at 1732163054767Writing region close event to WAL at 1732163054767Closed at 1732163054767 2024-11-21T04:24:14,767 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for bb93d80c063cb8f9905037385a95f084: Waiting for close lock at 1732163054767Disabling compacts and flushes for region at 1732163054767Disabling writes for close at 1732163054767Writing region close event to WAL at 1732163054767Closed at 1732163054767 2024-11-21T04:24:14,768 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:24:14,768 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732163054768"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163054768"}]},"ts":"1732163054768"} 2024-11-21T04:24:14,769 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732163054768"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163054768"}]},"ts":"1732163054768"} 2024-11-21T04:24:14,770 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:24:14,771 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:24:14,771 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163054771"}]},"ts":"1732163054771"} 2024-11-21T04:24:14,773 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-21T04:24:14,773 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:24:14,774 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:24:14,774 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:24:14,774 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:24:14,774 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:24:14,774 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:24:14,774 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:24:14,774 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:24:14,774 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:24:14,774 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:24:14,774 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:24:14,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb93d80c063cb8f9905037385a95f084, ASSIGN}, {pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6b2ef578f95b8856f03c7eec90dba5fe, ASSIGN}] 2024-11-21T04:24:14,776 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6b2ef578f95b8856f03c7eec90dba5fe, ASSIGN 2024-11-21T04:24:14,776 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb93d80c063cb8f9905037385a95f084, ASSIGN 2024-11-21T04:24:14,776 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6b2ef578f95b8856f03c7eec90dba5fe, ASSIGN; state=OFFLINE, location=fdc0a393186c,42393,1732162938268; forceNewPlan=false, retain=false 2024-11-21T04:24:14,776 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb93d80c063cb8f9905037385a95f084, ASSIGN; state=OFFLINE, location=fdc0a393186c,34363,1732162938185; forceNewPlan=false, retain=false 2024-11-21T04:24:14,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-21T04:24:14,927 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:24:14,927 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=6b2ef578f95b8856f03c7eec90dba5fe, regionState=OPENING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:14,927 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=bb93d80c063cb8f9905037385a95f084, regionState=OPENING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:14,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb93d80c063cb8f9905037385a95f084, ASSIGN because future has completed 2024-11-21T04:24:14,930 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure bb93d80c063cb8f9905037385a95f084, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:24:14,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6b2ef578f95b8856f03c7eec90dba5fe, ASSIGN because future has completed 2024-11-21T04:24:14,930 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b2ef578f95b8856f03c7eec90dba5fe, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:24:15,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-21T04:24:15,084 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. 2024-11-21T04:24:15,084 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7752): Opening region: {ENCODED => bb93d80c063cb8f9905037385a95f084, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084.', STARTKEY => '', ENDKEY => '2'} 2024-11-21T04:24:15,085 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. service=AccessControlService 2024-11-21T04:24:15,085 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:24:15,085 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,085 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:15,085 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7794): checking encryption for bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,085 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7797): checking classloading for bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,086 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. 2024-11-21T04:24:15,086 INFO [StoreOpener-bb93d80c063cb8f9905037385a95f084-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,086 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7752): Opening region: {ENCODED => 6b2ef578f95b8856f03c7eec90dba5fe, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe.', STARTKEY => '2', ENDKEY => ''} 2024-11-21T04:24:15,087 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. service=AccessControlService 2024-11-21T04:24:15,087 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:24:15,087 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,087 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:15,087 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7794): checking encryption for 6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,087 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7797): checking classloading for 6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,088 INFO [StoreOpener-bb93d80c063cb8f9905037385a95f084-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb93d80c063cb8f9905037385a95f084 columnFamilyName cf 2024-11-21T04:24:15,088 DEBUG [StoreOpener-bb93d80c063cb8f9905037385a95f084-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:15,088 INFO [StoreOpener-bb93d80c063cb8f9905037385a95f084-1 {}] regionserver.HStore(327): Store=bb93d80c063cb8f9905037385a95f084/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:24:15,088 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1038): replaying wal for bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,089 INFO [StoreOpener-6b2ef578f95b8856f03c7eec90dba5fe-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,089 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,089 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,090 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1048): stopping wal replay for bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,090 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1060): Cleaning up temporary data for bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,090 INFO [StoreOpener-6b2ef578f95b8856f03c7eec90dba5fe-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6b2ef578f95b8856f03c7eec90dba5fe columnFamilyName cf 2024-11-21T04:24:15,090 DEBUG [StoreOpener-6b2ef578f95b8856f03c7eec90dba5fe-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:15,091 INFO [StoreOpener-6b2ef578f95b8856f03c7eec90dba5fe-1 {}] regionserver.HStore(327): Store=6b2ef578f95b8856f03c7eec90dba5fe/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:24:15,091 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1038): replaying wal for 6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,091 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1093): writing seq id for bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,092 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,092 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,092 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1048): stopping wal replay for 6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,092 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1060): Cleaning up temporary data for 6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,093 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:24:15,093 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1093): writing seq id for 6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,093 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1114): Opened bb93d80c063cb8f9905037385a95f084; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66504177, jitterRate=-0.009010538458824158}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:24:15,094 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,094 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1006): Region open journal for bb93d80c063cb8f9905037385a95f084: Running coprocessor pre-open hook at 1732163055085Writing region info on filesystem at 1732163055085Initializing all the Stores at 1732163055086 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163055086Cleaning up temporary data from old regions at 1732163055090 (+4 ms)Running coprocessor post-open hooks at 1732163055094 (+4 ms)Region opened successfully at 1732163055094 2024-11-21T04:24:15,095 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:24:15,095 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084., pid=108, masterSystemTime=1732163055081 2024-11-21T04:24:15,095 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1114): Opened 6b2ef578f95b8856f03c7eec90dba5fe; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61080273, jitterRate=-0.08983300626277924}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:24:15,095 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,095 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1006): Region open journal for 6b2ef578f95b8856f03c7eec90dba5fe: Running coprocessor pre-open hook at 1732163055087Writing region info on filesystem at 1732163055088 (+1 ms)Initializing all the Stores at 1732163055088Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163055088Cleaning up temporary data from old regions at 1732163055092 (+4 ms)Running coprocessor post-open hooks at 1732163055095 (+3 ms)Region opened successfully at 1732163055095 2024-11-21T04:24:15,096 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe., pid=109, masterSystemTime=1732163055083 2024-11-21T04:24:15,097 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. 2024-11-21T04:24:15,097 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. 2024-11-21T04:24:15,097 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=bb93d80c063cb8f9905037385a95f084, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:15,097 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. 2024-11-21T04:24:15,097 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. 2024-11-21T04:24:15,098 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=6b2ef578f95b8856f03c7eec90dba5fe, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:15,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=108, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure bb93d80c063cb8f9905037385a95f084, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:24:15,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b2ef578f95b8856f03c7eec90dba5fe, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:24:15,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=106 2024-11-21T04:24:15,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=106, state=SUCCESS, hasLock=false; OpenRegionProcedure bb93d80c063cb8f9905037385a95f084, server=fdc0a393186c,34363,1732162938185 in 171 msec 2024-11-21T04:24:15,102 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=107 2024-11-21T04:24:15,103 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=107, state=SUCCESS, hasLock=false; OpenRegionProcedure 6b2ef578f95b8856f03c7eec90dba5fe, server=fdc0a393186c,42393,1732162938268 in 171 msec 2024-11-21T04:24:15,103 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb93d80c063cb8f9905037385a95f084, ASSIGN in 327 msec 2024-11-21T04:24:15,104 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=107, resume processing ppid=105 2024-11-21T04:24:15,104 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6b2ef578f95b8856f03c7eec90dba5fe, ASSIGN in 327 msec 2024-11-21T04:24:15,104 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:24:15,105 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163055104"}]},"ts":"1732163055104"} 2024-11-21T04:24:15,106 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-21T04:24:15,107 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:24:15,107 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-21T04:24:15,109 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-21T04:24:15,112 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-21T04:24:15,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:15,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:15,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:15,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:15,142 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:15,142 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:15,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:15,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:15,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:15,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:15,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:15,143 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:15,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 401 msec 2024-11-21T04:24:15,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-21T04:24:15,373 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-21T04:24:15,378 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:24:15,384 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:15,387 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-21T04:24:15,399 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [bb93d80c063cb8f9905037385a95f084, 6b2ef578f95b8856f03c7eec90dba5fe] 2024-11-21T04:24:15,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[bb93d80c063cb8f9905037385a95f084, 6b2ef578f95b8856f03c7eec90dba5fe], force=true 2024-11-21T04:24:15,404 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[bb93d80c063cb8f9905037385a95f084, 6b2ef578f95b8856f03c7eec90dba5fe], force=true 2024-11-21T04:24:15,404 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[bb93d80c063cb8f9905037385a95f084, 6b2ef578f95b8856f03c7eec90dba5fe], force=true 2024-11-21T04:24:15,404 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[bb93d80c063cb8f9905037385a95f084, 6b2ef578f95b8856f03c7eec90dba5fe], force=true 2024-11-21T04:24:15,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-21T04:24:15,416 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb93d80c063cb8f9905037385a95f084, UNASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6b2ef578f95b8856f03c7eec90dba5fe, UNASSIGN}] 2024-11-21T04:24:15,417 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb93d80c063cb8f9905037385a95f084, UNASSIGN 2024-11-21T04:24:15,417 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6b2ef578f95b8856f03c7eec90dba5fe, UNASSIGN 2024-11-21T04:24:15,418 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=bb93d80c063cb8f9905037385a95f084, regionState=CLOSING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:15,418 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=6b2ef578f95b8856f03c7eec90dba5fe, regionState=CLOSING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:15,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb93d80c063cb8f9905037385a95f084, UNASSIGN because future has completed 2024-11-21T04:24:15,420 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-21T04:24:15,420 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=113, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure bb93d80c063cb8f9905037385a95f084, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:24:15,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6b2ef578f95b8856f03c7eec90dba5fe, UNASSIGN because future has completed 2024-11-21T04:24:15,421 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-21T04:24:15,421 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=114, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6b2ef578f95b8856f03c7eec90dba5fe, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:24:15,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-21T04:24:15,571 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(122): Close bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,572 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-21T04:24:15,572 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1722): Closing bb93d80c063cb8f9905037385a95f084, disabling compactions & flushes 2024-11-21T04:24:15,572 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. 2024-11-21T04:24:15,572 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. 2024-11-21T04:24:15,572 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. after waiting 0 ms 2024-11-21T04:24:15,572 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. 2024-11-21T04:24:15,572 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2902): Flushing bb93d80c063cb8f9905037385a95f084 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-21T04:24:15,572 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(122): Close 6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,572 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-21T04:24:15,572 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1722): Closing 6b2ef578f95b8856f03c7eec90dba5fe, disabling compactions & flushes 2024-11-21T04:24:15,572 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. 2024-11-21T04:24:15,572 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. 2024-11-21T04:24:15,573 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. after waiting 0 ms 2024-11-21T04:24:15,573 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. 2024-11-21T04:24:15,573 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(2902): Flushing 6b2ef578f95b8856f03c7eec90dba5fe 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-21T04:24:15,586 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/.tmp/cf/4f93e5a975764abebc46aeb1839906b1 is 28, key is 1/cf:/1732163055380/Put/seqid=0 2024-11-21T04:24:15,586 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/.tmp/cf/0f4b2e58466a46999978b599ca1b9f48 is 28, key is 2/cf:/1732163055386/Put/seqid=0 2024-11-21T04:24:15,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742099_1275 (size=4945) 2024-11-21T04:24:15,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742099_1275 (size=4945) 2024-11-21T04:24:15,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742099_1275 (size=4945) 2024-11-21T04:24:15,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742098_1274 (size=4945) 2024-11-21T04:24:15,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742098_1274 (size=4945) 2024-11-21T04:24:15,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742098_1274 (size=4945) 2024-11-21T04:24:15,592 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/.tmp/cf/4f93e5a975764abebc46aeb1839906b1 2024-11-21T04:24:15,592 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/.tmp/cf/0f4b2e58466a46999978b599ca1b9f48 2024-11-21T04:24:15,597 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/.tmp/cf/0f4b2e58466a46999978b599ca1b9f48 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/cf/0f4b2e58466a46999978b599ca1b9f48 2024-11-21T04:24:15,597 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/.tmp/cf/4f93e5a975764abebc46aeb1839906b1 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/cf/4f93e5a975764abebc46aeb1839906b1 2024-11-21T04:24:15,602 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/cf/0f4b2e58466a46999978b599ca1b9f48, entries=1, sequenceid=5, filesize=4.8 K 2024-11-21T04:24:15,602 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/cf/4f93e5a975764abebc46aeb1839906b1, entries=1, sequenceid=5, filesize=4.8 K 2024-11-21T04:24:15,603 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 6b2ef578f95b8856f03c7eec90dba5fe in 30ms, sequenceid=5, compaction requested=false 2024-11-21T04:24:15,603 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for bb93d80c063cb8f9905037385a95f084 in 31ms, sequenceid=5, compaction requested=false 2024-11-21T04:24:15,606 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T04:24:15,606 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T04:24:15,607 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:24:15,607 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:24:15,607 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. 2024-11-21T04:24:15,607 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. 2024-11-21T04:24:15,607 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1676): Region close journal for bb93d80c063cb8f9905037385a95f084: Waiting for close lock at 1732163055572Running coprocessor pre-close hooks at 1732163055572Disabling compacts and flushes for region at 1732163055572Disabling writes for close at 1732163055572Obtaining lock to block concurrent updates at 1732163055572Preparing flush snapshotting stores in bb93d80c063cb8f9905037385a95f084 at 1732163055572Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732163055572Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084. at 1732163055573 (+1 ms)Flushing bb93d80c063cb8f9905037385a95f084/cf: creating writer at 1732163055573Flushing bb93d80c063cb8f9905037385a95f084/cf: appending metadata at 1732163055586 (+13 ms)Flushing bb93d80c063cb8f9905037385a95f084/cf: closing flushed file at 1732163055586Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e81a27d: reopening flushed file at 1732163055596 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for bb93d80c063cb8f9905037385a95f084 in 31ms, sequenceid=5, compaction requested=false at 1732163055603 (+7 ms)Writing region close event to WAL at 1732163055603Running coprocessor post-close hooks at 1732163055606 (+3 ms)Closed at 1732163055607 (+1 ms) 2024-11-21T04:24:15,607 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1676): Region close journal for 6b2ef578f95b8856f03c7eec90dba5fe: Waiting for close lock at 1732163055572Running coprocessor pre-close hooks at 1732163055572Disabling compacts and flushes for region at 1732163055572Disabling writes for close at 1732163055573 (+1 ms)Obtaining lock to block concurrent updates at 1732163055573Preparing flush snapshotting stores in 6b2ef578f95b8856f03c7eec90dba5fe at 1732163055573Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732163055573Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe. at 1732163055573Flushing 6b2ef578f95b8856f03c7eec90dba5fe/cf: creating writer at 1732163055573Flushing 6b2ef578f95b8856f03c7eec90dba5fe/cf: appending metadata at 1732163055586 (+13 ms)Flushing 6b2ef578f95b8856f03c7eec90dba5fe/cf: closing flushed file at 1732163055586Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c36b3f1: reopening flushed file at 1732163055596 (+10 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 6b2ef578f95b8856f03c7eec90dba5fe in 30ms, sequenceid=5, compaction requested=false at 1732163055603 (+7 ms)Writing region close event to WAL at 1732163055603Running coprocessor post-close hooks at 1732163055607 (+4 ms)Closed at 1732163055607 2024-11-21T04:24:15,608 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(157): Closed 6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:15,609 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=6b2ef578f95b8856f03c7eec90dba5fe, regionState=CLOSED 2024-11-21T04:24:15,609 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(157): Closed bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:15,610 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=bb93d80c063cb8f9905037385a95f084, regionState=CLOSED 2024-11-21T04:24:15,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=114, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6b2ef578f95b8856f03c7eec90dba5fe, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:24:15,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=113, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure bb93d80c063cb8f9905037385a95f084, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:24:15,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=114, resume processing ppid=112 2024-11-21T04:24:15,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, ppid=112, state=SUCCESS, hasLock=false; CloseRegionProcedure 6b2ef578f95b8856f03c7eec90dba5fe, server=fdc0a393186c,42393,1732162938268 in 191 msec 2024-11-21T04:24:15,614 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6b2ef578f95b8856f03c7eec90dba5fe, UNASSIGN in 197 msec 2024-11-21T04:24:15,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=113, resume processing ppid=111 2024-11-21T04:24:15,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, ppid=111, state=SUCCESS, hasLock=false; CloseRegionProcedure bb93d80c063cb8f9905037385a95f084, server=fdc0a393186c,34363,1732162938185 in 193 msec 2024-11-21T04:24:15,616 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-11-21T04:24:15,616 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb93d80c063cb8f9905037385a95f084, UNASSIGN in 198 msec 2024-11-21T04:24:15,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742100_1276 (size=84) 2024-11-21T04:24:15,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742100_1276 (size=84) 2024-11-21T04:24:15,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742100_1276 (size=84) 2024-11-21T04:24:15,632 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:15,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742101_1277 (size=20) 2024-11-21T04:24:15,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742101_1277 (size=20) 2024-11-21T04:24:15,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742101_1277 (size=20) 2024-11-21T04:24:15,641 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:15,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742102_1278 (size=21) 2024-11-21T04:24:15,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742102_1278 (size=21) 2024-11-21T04:24:15,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742102_1278 (size=21) 2024-11-21T04:24:15,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742103_1279 (size=84) 2024-11-21T04:24:15,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742103_1279 (size=84) 2024-11-21T04:24:15,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742103_1279 (size=84) 2024-11-21T04:24:15,654 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:15,664 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-21T04:24:15,666 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054741.bb93d80c063cb8f9905037385a95f084.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:15,666 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732163054741.6b2ef578f95b8856f03c7eec90dba5fe.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:15,666 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:15,679 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=263f5d8fad900dc5a7f481f25a73636a, ASSIGN}] 2024-11-21T04:24:15,680 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=263f5d8fad900dc5a7f481f25a73636a, ASSIGN 2024-11-21T04:24:15,681 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=263f5d8fad900dc5a7f481f25a73636a, ASSIGN; state=MERGED, location=fdc0a393186c,34363,1732162938185; forceNewPlan=false, retain=false 2024-11-21T04:24:15,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-21T04:24:15,831 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-21T04:24:15,832 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=263f5d8fad900dc5a7f481f25a73636a, regionState=OPENING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:15,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=263f5d8fad900dc5a7f481f25a73636a, ASSIGN because future has completed 2024-11-21T04:24:15,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 263f5d8fad900dc5a7f481f25a73636a, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:24:15,990 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. 2024-11-21T04:24:15,991 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7752): Opening region: {ENCODED => 263f5d8fad900dc5a7f481f25a73636a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a.', STARTKEY => '', ENDKEY => ''} 2024-11-21T04:24:15,991 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. service=AccessControlService 2024-11-21T04:24:15,991 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:24:15,991 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:15,991 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:15,991 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7794): checking encryption for 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:15,991 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7797): checking classloading for 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:15,993 INFO [StoreOpener-263f5d8fad900dc5a7f481f25a73636a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:15,994 INFO [StoreOpener-263f5d8fad900dc5a7f481f25a73636a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 263f5d8fad900dc5a7f481f25a73636a columnFamilyName cf 2024-11-21T04:24:15,994 DEBUG [StoreOpener-263f5d8fad900dc5a7f481f25a73636a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:16,012 DEBUG [StoreOpener-263f5d8fad900dc5a7f481f25a73636a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/cf/0f4b2e58466a46999978b599ca1b9f48.6b2ef578f95b8856f03c7eec90dba5fe->hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/cf/0f4b2e58466a46999978b599ca1b9f48-top 2024-11-21T04:24:16,017 DEBUG [StoreOpener-263f5d8fad900dc5a7f481f25a73636a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/cf/4f93e5a975764abebc46aeb1839906b1.bb93d80c063cb8f9905037385a95f084->hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/cf/4f93e5a975764abebc46aeb1839906b1-top 2024-11-21T04:24:16,018 INFO [StoreOpener-263f5d8fad900dc5a7f481f25a73636a-1 {}] regionserver.HStore(327): Store=263f5d8fad900dc5a7f481f25a73636a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:24:16,018 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1038): replaying wal for 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:16,019 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:16,020 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:16,020 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1048): stopping wal replay for 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:16,020 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1060): Cleaning up temporary data for 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:16,022 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1093): writing seq id for 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:16,023 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1114): Opened 263f5d8fad900dc5a7f481f25a73636a; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68698274, jitterRate=0.023684054613113403}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:24:16,023 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:16,024 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1006): Region open journal for 263f5d8fad900dc5a7f481f25a73636a: Running coprocessor pre-open hook at 1732163055992Writing region info on filesystem at 1732163055992Initializing all the Stores at 1732163055993 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163055993Cleaning up temporary data from old regions at 1732163056020 (+27 ms)Running coprocessor post-open hooks at 1732163056023 (+3 ms)Region opened successfully at 1732163056024 (+1 ms) 2024-11-21T04:24:16,025 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a., pid=116, masterSystemTime=1732163055987 2024-11-21T04:24:16,025 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a.,because compaction is disabled. 2024-11-21T04:24:16,027 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. 2024-11-21T04:24:16,027 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. 2024-11-21T04:24:16,027 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=263f5d8fad900dc5a7f481f25a73636a, regionState=OPEN, openSeqNum=9, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:16,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 263f5d8fad900dc5a7f481f25a73636a, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:24:16,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=115 2024-11-21T04:24:16,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 263f5d8fad900dc5a7f481f25a73636a, server=fdc0a393186c,34363,1732162938185 in 195 msec 2024-11-21T04:24:16,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-21T04:24:16,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=110 2024-11-21T04:24:16,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=263f5d8fad900dc5a7f481f25a73636a, ASSIGN in 352 msec 2024-11-21T04:24:16,034 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[bb93d80c063cb8f9905037385a95f084, 6b2ef578f95b8856f03c7eec90dba5fe], force=true in 633 msec 2024-11-21T04:24:16,349 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:24:16,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-21T04:24:16,543 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-21T04:24:16,544 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-21T04:24:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163056544 (current time:1732163056544). 2024-11-21T04:24:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:24:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-21T04:24:16,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:24:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25198793, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:16,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:16,547 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:16,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:16,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:16,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45e07860, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:16,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:16,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:16,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:16,550 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50646, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:16,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51756a0a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:16,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:16,554 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:16,554 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:16,556 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56204, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:16,558 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:24:16,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:16,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:16,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:16,559 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:16,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61b859fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:16,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:16,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:16,562 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:16,562 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:16,562 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:16,562 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d0e02b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:16,562 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:16,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:16,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:16,564 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50658, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:16,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eeb8489, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:16,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:16,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:16,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:16,568 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56220, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:16,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:16,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:16,571 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45662, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:16,572 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:24:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:16,572 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-21T04:24:16,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:24:16,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-21T04:24:16,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-21T04:24:16,575 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:24:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-21T04:24:16,576 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:24:16,578 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:24:16,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742104_1280 (size=216) 2024-11-21T04:24:16,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742104_1280 (size=216) 2024-11-21T04:24:16,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742104_1280 (size=216) 2024-11-21T04:24:16,587 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:24:16,587 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 263f5d8fad900dc5a7f481f25a73636a}] 2024-11-21T04:24:16,588 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:16,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-21T04:24:16,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=118 2024-11-21T04:24:16,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. 2024-11-21T04:24:16,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.HRegion(2603): Flush status journal for 263f5d8fad900dc5a7f481f25a73636a: 2024-11-21T04:24:16,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-21T04:24:16,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:16,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:16,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/cf/0f4b2e58466a46999978b599ca1b9f48.6b2ef578f95b8856f03c7eec90dba5fe->hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/cf/0f4b2e58466a46999978b599ca1b9f48-top, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/cf/4f93e5a975764abebc46aeb1839906b1.bb93d80c063cb8f9905037385a95f084->hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/cf/4f93e5a975764abebc46aeb1839906b1-top] hfiles 2024-11-21T04:24:16,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/cf/0f4b2e58466a46999978b599ca1b9f48.6b2ef578f95b8856f03c7eec90dba5fe for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:16,742 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/cf/4f93e5a975764abebc46aeb1839906b1.bb93d80c063cb8f9905037385a95f084 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:16,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742105_1281 (size=269) 2024-11-21T04:24:16,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742105_1281 (size=269) 2024-11-21T04:24:16,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742105_1281 (size=269) 2024-11-21T04:24:16,749 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. 2024-11-21T04:24:16,749 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=118 2024-11-21T04:24:16,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=118 2024-11-21T04:24:16,749 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:16,749 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:16,752 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=117 2024-11-21T04:24:16,752 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:24:16,752 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=117, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 263f5d8fad900dc5a7f481f25a73636a in 163 msec 2024-11-21T04:24:16,752 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:24:16,753 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:24:16,753 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:16,753 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:16,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742106_1282 (size=670) 2024-11-21T04:24:16,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742106_1282 (size=670) 2024-11-21T04:24:16,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742106_1282 (size=670) 2024-11-21T04:24:16,764 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:24:16,769 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:24:16,770 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:16,771 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:24:16,771 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-21T04:24:16,772 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 198 msec 2024-11-21T04:24:16,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-21T04:24:16,893 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-21T04:24:16,893 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163056893 2024-11-21T04:24:16,893 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44067, tgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163056893, rawTgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163056893, srcFsUri=hdfs://localhost:44067, srcDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:16,921 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44067, inputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:16,921 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163056893, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163056893/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:16,923 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T04:24:16,928 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163056893/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:16,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742108_1284 (size=670) 2024-11-21T04:24:16,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742108_1284 (size=670) 2024-11-21T04:24:16,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742107_1283 (size=216) 2024-11-21T04:24:16,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742107_1283 (size=216) 2024-11-21T04:24:16,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742107_1283 (size=216) 2024-11-21T04:24:16,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742108_1284 (size=670) 2024-11-21T04:24:17,049 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0005_000001 (auth:SIMPLE) from 127.0.0.1:53372 2024-11-21T04:24:17,059 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0005/container_1732162945431_0005_01_000001/launch_container.sh] 2024-11-21T04:24:17,059 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0005/container_1732162945431_0005_01_000001/container_tokens] 2024-11-21T04:24:17,059 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0005/container_1732162945431_0005_01_000001/sysfs] 2024-11-21T04:24:17,347 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:17,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:17,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:17,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:17,560 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-21T04:24:17,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:17,560 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-21T04:24:17,561 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-21T04:24:18,230 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:24:18,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-15880240062160864229.jar 2024-11-21T04:24:18,269 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:18,270 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:18,328 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-1780550618826883333.jar 2024-11-21T04:24:18,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:18,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:18,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:18,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:18,329 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:18,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:18,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T04:24:18,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T04:24:18,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T04:24:18,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T04:24:18,330 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T04:24:18,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T04:24:18,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T04:24:18,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T04:24:18,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T04:24:18,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T04:24:18,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T04:24:18,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:24:18,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:24:18,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:24:18,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:24:18,332 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:24:18,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:24:18,333 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:24:18,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742109_1285 (size=131440) 2024-11-21T04:24:18,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742109_1285 (size=131440) 2024-11-21T04:24:18,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742109_1285 (size=131440) 2024-11-21T04:24:18,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742110_1286 (size=4188619) 2024-11-21T04:24:18,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742110_1286 (size=4188619) 2024-11-21T04:24:18,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742110_1286 (size=4188619) 2024-11-21T04:24:18,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742111_1287 (size=1323991) 2024-11-21T04:24:18,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742111_1287 (size=1323991) 2024-11-21T04:24:18,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742111_1287 (size=1323991) 2024-11-21T04:24:18,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742112_1288 (size=903734) 2024-11-21T04:24:18,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742112_1288 (size=903734) 2024-11-21T04:24:18,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742112_1288 (size=903734) 2024-11-21T04:24:18,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742113_1289 (size=8360083) 2024-11-21T04:24:18,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742113_1289 (size=8360083) 2024-11-21T04:24:18,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742113_1289 (size=8360083) 2024-11-21T04:24:18,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742114_1290 (size=6424740) 2024-11-21T04:24:18,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742114_1290 (size=6424740) 2024-11-21T04:24:18,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742114_1290 (size=6424740) 2024-11-21T04:24:18,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742115_1291 (size=1877034) 2024-11-21T04:24:18,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742115_1291 (size=1877034) 2024-11-21T04:24:18,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742115_1291 (size=1877034) 2024-11-21T04:24:18,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742116_1292 (size=77835) 2024-11-21T04:24:18,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742116_1292 (size=77835) 2024-11-21T04:24:18,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742116_1292 (size=77835) 2024-11-21T04:24:18,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742117_1293 (size=30949) 2024-11-21T04:24:18,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742117_1293 (size=30949) 2024-11-21T04:24:18,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742117_1293 (size=30949) 2024-11-21T04:24:18,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742118_1294 (size=1597327) 2024-11-21T04:24:18,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742118_1294 (size=1597327) 2024-11-21T04:24:18,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742118_1294 (size=1597327) 2024-11-21T04:24:18,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742119_1295 (size=4695811) 2024-11-21T04:24:18,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742119_1295 (size=4695811) 2024-11-21T04:24:18,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742119_1295 (size=4695811) 2024-11-21T04:24:18,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742120_1296 (size=232957) 2024-11-21T04:24:18,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742120_1296 (size=232957) 2024-11-21T04:24:18,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742120_1296 (size=232957) 2024-11-21T04:24:18,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742121_1297 (size=127628) 2024-11-21T04:24:18,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742121_1297 (size=127628) 2024-11-21T04:24:18,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742121_1297 (size=127628) 2024-11-21T04:24:18,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742122_1298 (size=20406) 2024-11-21T04:24:18,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742122_1298 (size=20406) 2024-11-21T04:24:18,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742122_1298 (size=20406) 2024-11-21T04:24:18,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742123_1299 (size=5175431) 2024-11-21T04:24:18,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742123_1299 (size=5175431) 2024-11-21T04:24:18,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742123_1299 (size=5175431) 2024-11-21T04:24:18,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742124_1300 (size=217634) 2024-11-21T04:24:18,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742124_1300 (size=217634) 2024-11-21T04:24:18,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742124_1300 (size=217634) 2024-11-21T04:24:18,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742125_1301 (size=1832290) 2024-11-21T04:24:18,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742125_1301 (size=1832290) 2024-11-21T04:24:18,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742125_1301 (size=1832290) 2024-11-21T04:24:18,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742126_1302 (size=322274) 2024-11-21T04:24:18,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742126_1302 (size=322274) 2024-11-21T04:24:18,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742126_1302 (size=322274) 2024-11-21T04:24:18,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742127_1303 (size=503880) 2024-11-21T04:24:18,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742127_1303 (size=503880) 2024-11-21T04:24:18,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742127_1303 (size=503880) 2024-11-21T04:24:18,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742128_1304 (size=29229) 2024-11-21T04:24:18,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742128_1304 (size=29229) 2024-11-21T04:24:18,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742128_1304 (size=29229) 2024-11-21T04:24:18,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742129_1305 (size=24096) 2024-11-21T04:24:18,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742129_1305 (size=24096) 2024-11-21T04:24:18,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742129_1305 (size=24096) 2024-11-21T04:24:18,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742130_1306 (size=111872) 2024-11-21T04:24:18,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742130_1306 (size=111872) 2024-11-21T04:24:18,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742130_1306 (size=111872) 2024-11-21T04:24:18,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742131_1307 (size=440656) 2024-11-21T04:24:18,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742131_1307 (size=440656) 2024-11-21T04:24:18,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742131_1307 (size=440656) 2024-11-21T04:24:18,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742132_1308 (size=45609) 2024-11-21T04:24:18,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742132_1308 (size=45609) 2024-11-21T04:24:18,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742132_1308 (size=45609) 2024-11-21T04:24:18,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742133_1309 (size=136454) 2024-11-21T04:24:18,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742133_1309 (size=136454) 2024-11-21T04:24:18,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742133_1309 (size=136454) 2024-11-21T04:24:18,632 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T04:24:18,634 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-21T04:24:18,635 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=9.7 K 2024-11-21T04:24:18,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742134_1310 (size=378) 2024-11-21T04:24:18,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742134_1310 (size=378) 2024-11-21T04:24:18,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742134_1310 (size=378) 2024-11-21T04:24:18,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742135_1311 (size=15) 2024-11-21T04:24:18,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742135_1311 (size=15) 2024-11-21T04:24:18,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742135_1311 (size=15) 2024-11-21T04:24:18,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742136_1312 (size=303791) 2024-11-21T04:24:18,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742136_1312 (size=303791) 2024-11-21T04:24:18,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742136_1312 (size=303791) 2024-11-21T04:24:18,685 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:24:18,685 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:24:19,055 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0006_000001 (auth:SIMPLE) from 127.0.0.1:53382 2024-11-21T04:24:20,118 WARN [regionserver/fdc0a393186c:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 4, running: 1 2024-11-21T04:24:21,368 DEBUG [master/fdc0a393186c:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 65c69c31a4fbeb4741a9756ecf63ce3d changed from -1.0 to 0.0, refreshing cache 2024-11-21T04:24:21,368 DEBUG [master/fdc0a393186c:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 657ef1187faef80e8de0106b2a68582c changed from -1.0 to 0.0, refreshing cache 2024-11-21T04:24:24,284 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0006_000001 (auth:SIMPLE) from 127.0.0.1:50826 2024-11-21T04:24:24,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742137_1313 (size=349441) 2024-11-21T04:24:24,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742137_1313 (size=349441) 2024-11-21T04:24:24,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742137_1313 (size=349441) 2024-11-21T04:24:26,551 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0006_000001 (auth:SIMPLE) from 127.0.0.1:36178 2024-11-21T04:24:30,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742138_1314 (size=4945) 2024-11-21T04:24:30,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742138_1314 (size=4945) 2024-11-21T04:24:30,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742138_1314 (size=4945) 2024-11-21T04:24:30,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742139_1315 (size=4945) 2024-11-21T04:24:30,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742139_1315 (size=4945) 2024-11-21T04:24:30,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742139_1315 (size=4945) 2024-11-21T04:24:30,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742140_1316 (size=17474) 2024-11-21T04:24:30,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742140_1316 (size=17474) 2024-11-21T04:24:30,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742140_1316 (size=17474) 2024-11-21T04:24:30,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742141_1317 (size=482) 2024-11-21T04:24:30,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742141_1317 (size=482) 2024-11-21T04:24:30,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742141_1317 (size=482) 2024-11-21T04:24:30,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742142_1318 (size=17474) 2024-11-21T04:24:30,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742142_1318 (size=17474) 2024-11-21T04:24:30,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742142_1318 (size=17474) 2024-11-21T04:24:30,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742143_1319 (size=349441) 2024-11-21T04:24:30,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742143_1319 (size=349441) 2024-11-21T04:24:30,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742143_1319 (size=349441) 2024-11-21T04:24:30,208 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0006_000001 (auth:SIMPLE) from 127.0.0.1:36194 2024-11-21T04:24:30,221 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732162945431_0006_01_000002 is : 143 2024-11-21T04:24:30,232 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0006/container_1732162945431_0006_01_000002/launch_container.sh] 2024-11-21T04:24:30,232 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0006/container_1732162945431_0006_01_000002/container_tokens] 2024-11-21T04:24:30,232 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0006/container_1732162945431_0006_01_000002/sysfs] 2024-11-21T04:24:32,033 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T04:24:32,035 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T04:24:32,042 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,042 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T04:24:32,043 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T04:24:32,043 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,043 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-21T04:24:32,043 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-21T04:24:32,043 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163056893/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163056893/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,044 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163056893/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-21T04:24:32,044 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163056893/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-21T04:24:32,050 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-21T04:24:32,053 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163072053"}]},"ts":"1732163072053"} 2024-11-21T04:24:32,055 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-21T04:24:32,055 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-21T04:24:32,056 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-21T04:24:32,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=263f5d8fad900dc5a7f481f25a73636a, UNASSIGN}] 2024-11-21T04:24:32,058 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=263f5d8fad900dc5a7f481f25a73636a, UNASSIGN 2024-11-21T04:24:32,059 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=263f5d8fad900dc5a7f481f25a73636a, regionState=CLOSING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:32,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=263f5d8fad900dc5a7f481f25a73636a, UNASSIGN because future has completed 2024-11-21T04:24:32,060 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:24:32,061 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 263f5d8fad900dc5a7f481f25a73636a, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:24:32,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-21T04:24:32,213 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(122): Close 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:32,213 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:24:32,213 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1722): Closing 263f5d8fad900dc5a7f481f25a73636a, disabling compactions & flushes 2024-11-21T04:24:32,213 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. 2024-11-21T04:24:32,213 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. 2024-11-21T04:24:32,213 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. after waiting 0 ms 2024-11-21T04:24:32,213 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. 2024-11-21T04:24:32,242 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-21T04:24:32,242 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:24:32,242 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a. 2024-11-21T04:24:32,242 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1676): Region close journal for 263f5d8fad900dc5a7f481f25a73636a: Waiting for close lock at 1732163072213Running coprocessor pre-close hooks at 1732163072213Disabling compacts and flushes for region at 1732163072213Disabling writes for close at 1732163072213Writing region close event to WAL at 1732163072236 (+23 ms)Running coprocessor post-close hooks at 1732163072242 (+6 ms)Closed at 1732163072242 2024-11-21T04:24:32,244 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(157): Closed 263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:32,245 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=263f5d8fad900dc5a7f481f25a73636a, regionState=CLOSED 2024-11-21T04:24:32,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 263f5d8fad900dc5a7f481f25a73636a, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:24:32,249 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=122, resume processing ppid=121 2024-11-21T04:24:32,249 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, ppid=121, state=SUCCESS, hasLock=false; CloseRegionProcedure 263f5d8fad900dc5a7f481f25a73636a, server=fdc0a393186c,34363,1732162938185 in 187 msec 2024-11-21T04:24:32,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=120 2024-11-21T04:24:32,250 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=120, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=263f5d8fad900dc5a7f481f25a73636a, UNASSIGN in 192 msec 2024-11-21T04:24:32,265 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-11-21T04:24:32,265 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 195 msec 2024-11-21T04:24:32,268 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163072268"}]},"ts":"1732163072268"} 2024-11-21T04:24:32,272 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-21T04:24:32,272 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-21T04:24:32,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 223 msec 2024-11-21T04:24:32,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-21T04:24:32,372 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-21T04:24:32,372 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,375 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,376 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=123, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,378 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,379 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:32,381 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/recovered.edits] 2024-11-21T04:24:32,386 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/cf/0f4b2e58466a46999978b599ca1b9f48.6b2ef578f95b8856f03c7eec90dba5fe to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/cf/0f4b2e58466a46999978b599ca1b9f48.6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:32,387 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:32,387 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:32,387 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/cf/4f93e5a975764abebc46aeb1839906b1.bb93d80c063cb8f9905037385a95f084 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/cf/4f93e5a975764abebc46aeb1839906b1.bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:32,388 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/recovered.edits] 2024-11-21T04:24:32,389 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/recovered.edits] 2024-11-21T04:24:32,393 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/cf/4f93e5a975764abebc46aeb1839906b1 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/cf/4f93e5a975764abebc46aeb1839906b1 2024-11-21T04:24:32,393 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/recovered.edits/12.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a/recovered.edits/12.seqid 2024-11-21T04:24:32,394 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/263f5d8fad900dc5a7f481f25a73636a 2024-11-21T04:24:32,394 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/cf/0f4b2e58466a46999978b599ca1b9f48 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/cf/0f4b2e58466a46999978b599ca1b9f48 2024-11-21T04:24:32,397 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/recovered.edits/8.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084/recovered.edits/8.seqid 2024-11-21T04:24:32,397 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb93d80c063cb8f9905037385a95f084 2024-11-21T04:24:32,397 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/recovered.edits/8.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe/recovered.edits/8.seqid 2024-11-21T04:24:32,398 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6b2ef578f95b8856f03c7eec90dba5fe 2024-11-21T04:24:32,398 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-21T04:24:32,400 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=123, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,404 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-21T04:24:32,409 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-21T04:24:32,411 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=123, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,411 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-21T04:24:32,412 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163072411"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:32,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,415 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-21T04:24:32,415 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-21T04:24:32,416 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-21T04:24:32,416 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-21T04:24:32,417 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-21T04:24:32,417 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 263f5d8fad900dc5a7f481f25a73636a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a.', STARTKEY => '', ENDKEY => ''}] 2024-11-21T04:24:32,417 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-21T04:24:32,417 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732163072417"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:32,420 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-21T04:24:32,422 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=123, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,423 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 49 msec 2024-11-21T04:24:32,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:32,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:32,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:32,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:32,429 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:32,429 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:32,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=123 2024-11-21T04:24:32,430 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,430 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-21T04:24:32,430 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,431 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:32,431 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:32,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-21T04:24:32,435 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163072435"}]},"ts":"1732163072435"} 2024-11-21T04:24:32,437 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-21T04:24:32,437 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-21T04:24:32,438 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-21T04:24:32,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65c69c31a4fbeb4741a9756ecf63ce3d, UNASSIGN}, {pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=657ef1187faef80e8de0106b2a68582c, UNASSIGN}] 2024-11-21T04:24:32,442 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65c69c31a4fbeb4741a9756ecf63ce3d, UNASSIGN 2024-11-21T04:24:32,442 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=657ef1187faef80e8de0106b2a68582c, UNASSIGN 2024-11-21T04:24:32,443 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=657ef1187faef80e8de0106b2a68582c, regionState=CLOSING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:24:32,443 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=65c69c31a4fbeb4741a9756ecf63ce3d, regionState=CLOSING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:32,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=657ef1187faef80e8de0106b2a68582c, UNASSIGN because future has completed 2024-11-21T04:24:32,447 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:24:32,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 657ef1187faef80e8de0106b2a68582c, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:24:32,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65c69c31a4fbeb4741a9756ecf63ce3d, UNASSIGN because future has completed 2024-11-21T04:24:32,449 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:24:32,449 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:24:32,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-21T04:24:32,601 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(122): Close 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:32,602 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:24:32,602 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1722): Closing 657ef1187faef80e8de0106b2a68582c, disabling compactions & flushes 2024-11-21T04:24:32,602 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:32,602 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:32,602 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. after waiting 0 ms 2024-11-21T04:24:32,602 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:32,602 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:32,602 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:24:32,603 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 65c69c31a4fbeb4741a9756ecf63ce3d, disabling compactions & flushes 2024-11-21T04:24:32,603 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:32,603 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:32,603 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. after waiting 0 ms 2024-11-21T04:24:32,603 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:32,630 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:24:32,631 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:24:32,631 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d. 2024-11-21T04:24:32,631 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 65c69c31a4fbeb4741a9756ecf63ce3d: Waiting for close lock at 1732163072603Running coprocessor pre-close hooks at 1732163072603Disabling compacts and flushes for region at 1732163072603Disabling writes for close at 1732163072603Writing region close event to WAL at 1732163072611 (+8 ms)Running coprocessor post-close hooks at 1732163072631 (+20 ms)Closed at 1732163072631 2024-11-21T04:24:32,635 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:24:32,639 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:24:32,639 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c. 2024-11-21T04:24:32,639 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1676): Region close journal for 657ef1187faef80e8de0106b2a68582c: Waiting for close lock at 1732163072602Running coprocessor pre-close hooks at 1732163072602Disabling compacts and flushes for region at 1732163072602Disabling writes for close at 1732163072602Writing region close event to WAL at 1732163072610 (+8 ms)Running coprocessor post-close hooks at 1732163072639 (+29 ms)Closed at 1732163072639 2024-11-21T04:24:32,640 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:32,640 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=65c69c31a4fbeb4741a9756ecf63ce3d, regionState=CLOSED 2024-11-21T04:24:32,643 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(157): Closed 657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:32,643 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=657ef1187faef80e8de0106b2a68582c, regionState=CLOSED 2024-11-21T04:24:32,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:24:32,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 657ef1187faef80e8de0106b2a68582c, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:24:32,652 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=126 2024-11-21T04:24:32,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=126, state=SUCCESS, hasLock=false; CloseRegionProcedure 65c69c31a4fbeb4741a9756ecf63ce3d, server=fdc0a393186c,34363,1732162938185 in 197 msec 2024-11-21T04:24:32,655 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=127 2024-11-21T04:24:32,655 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 657ef1187faef80e8de0106b2a68582c, server=fdc0a393186c,39347,1732162938035 in 203 msec 2024-11-21T04:24:32,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65c69c31a4fbeb4741a9756ecf63ce3d, UNASSIGN in 212 msec 2024-11-21T04:24:32,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=125 2024-11-21T04:24:32,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=657ef1187faef80e8de0106b2a68582c, UNASSIGN in 214 msec 2024-11-21T04:24:32,663 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=125, resume processing ppid=124 2024-11-21T04:24:32,663 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, ppid=124, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 222 msec 2024-11-21T04:24:32,669 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163072669"}]},"ts":"1732163072669"} 2024-11-21T04:24:32,671 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-21T04:24:32,672 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-21T04:24:32,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 242 msec 2024-11-21T04:24:32,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-21T04:24:32,753 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-21T04:24:32,753 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,755 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,756 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,758 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,760 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:32,760 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:32,761 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/recovered.edits] 2024-11-21T04:24:32,762 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/recovered.edits] 2024-11-21T04:24:32,766 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/cf/74a7e312d57a48e6a1ef8dee3b5ce3d4 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/cf/74a7e312d57a48e6a1ef8dee3b5ce3d4 2024-11-21T04:24:32,766 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/cf/b93616a15fd6470f81963a10d04a3ae3 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/cf/b93616a15fd6470f81963a10d04a3ae3 2024-11-21T04:24:32,769 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d/recovered.edits/9.seqid 2024-11-21T04:24:32,769 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c/recovered.edits/9.seqid 2024-11-21T04:24:32,769 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/657ef1187faef80e8de0106b2a68582c 2024-11-21T04:24:32,770 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithMergeRegion/65c69c31a4fbeb4741a9756ecf63ce3d 2024-11-21T04:24:32,770 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-21T04:24:32,772 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,776 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-21T04:24:32,779 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-21T04:24:32,781 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,781 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-21T04:24:32,781 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163072781"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:32,781 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163072781"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:32,785 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T04:24:32,785 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 65c69c31a4fbeb4741a9756ecf63ce3d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732163053013.65c69c31a4fbeb4741a9756ecf63ce3d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 657ef1187faef80e8de0106b2a68582c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732163053013.657ef1187faef80e8de0106b2a68582c.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T04:24:32,785 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-21T04:24:32,785 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732163072785"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:32,787 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-21T04:24:32,788 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,790 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 35 msec 2024-11-21T04:24:32,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,832 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-21T04:24:32,832 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-21T04:24:32,832 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-21T04:24:32,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:32,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:32,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:32,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data null 2024-11-21T04:24:32,840 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-21T04:24:32,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:32,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-21T04:24:32,845 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,845 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-21T04:24:32,862 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-21T04:24:32,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,866 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-21T04:24:32,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:32,869 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-21T04:24:32,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:32,896 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=805 (was 793) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_562417925_1 at /127.0.0.1:42582 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36893 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 128244) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:51382 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:60498 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (625843813) connection to localhost/127.0.0.1:36893 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:54540 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4390 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/fdc0a393186c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/fdc0a393186c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/fdc0a393186c:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=804 (was 802) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=547 (was 620), ProcessCount=23 (was 22) - ProcessCount LEAK? -, AvailableMemoryMB=723 (was 816) 2024-11-21T04:24:32,896 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-21T04:24:32,913 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=805, OpenFileDescriptor=804, MaxFileDescriptor=1048576, SystemLoadAverage=547, ProcessCount=23, AvailableMemoryMB=718 2024-11-21T04:24:32,913 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-21T04:24:32,915 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:24:32,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T04:24:32,917 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:24:32,917 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:32,917 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 131 2024-11-21T04:24:32,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-21T04:24:32,919 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:24:32,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742144_1320 (size=407) 2024-11-21T04:24:32,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742144_1320 (size=407) 2024-11-21T04:24:32,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742144_1320 (size=407) 2024-11-21T04:24:32,930 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bd674b740b7e4d7818a9c1d139f861b9, NAME => 'testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:32,935 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 420a7d1da52b47aef21498fe0f4727b7, NAME => 'testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742145_1321 (size=68) 2024-11-21T04:24:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742145_1321 (size=68) 2024-11-21T04:24:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742145_1321 (size=68) 2024-11-21T04:24:32,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742146_1322 (size=68) 2024-11-21T04:24:32,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742146_1322 (size=68) 2024-11-21T04:24:32,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742146_1322 (size=68) 2024-11-21T04:24:32,959 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:32,959 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 420a7d1da52b47aef21498fe0f4727b7, disabling compactions & flushes 2024-11-21T04:24:32,959 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:32,959 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:32,959 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. after waiting 0 ms 2024-11-21T04:24:32,959 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:32,959 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:32,959 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 420a7d1da52b47aef21498fe0f4727b7: Waiting for close lock at 1732163072959Disabling compacts and flushes for region at 1732163072959Disabling writes for close at 1732163072959Writing region close event to WAL at 1732163072959Closed at 1732163072959 2024-11-21T04:24:33,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-21T04:24:33,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-21T04:24:33,351 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:33,351 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing bd674b740b7e4d7818a9c1d139f861b9, disabling compactions & flushes 2024-11-21T04:24:33,351 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:33,351 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:33,351 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. after waiting 0 ms 2024-11-21T04:24:33,351 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:33,351 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:33,351 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for bd674b740b7e4d7818a9c1d139f861b9: Waiting for close lock at 1732163073351Disabling compacts and flushes for region at 1732163073351Disabling writes for close at 1732163073351Writing region close event to WAL at 1732163073351Closed at 1732163073351 2024-11-21T04:24:33,353 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:24:33,353 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732163073353"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163073353"}]},"ts":"1732163073353"} 2024-11-21T04:24:33,353 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732163073353"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163073353"}]},"ts":"1732163073353"} 2024-11-21T04:24:33,355 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:24:33,356 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:24:33,357 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163073356"}]},"ts":"1732163073356"} 2024-11-21T04:24:33,358 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-21T04:24:33,359 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:24:33,360 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:24:33,360 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:24:33,360 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:24:33,360 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:24:33,360 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:24:33,360 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:24:33,360 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:24:33,360 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:24:33,360 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:24:33,360 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:24:33,360 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bd674b740b7e4d7818a9c1d139f861b9, ASSIGN}, {pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=420a7d1da52b47aef21498fe0f4727b7, ASSIGN}] 2024-11-21T04:24:33,361 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=420a7d1da52b47aef21498fe0f4727b7, ASSIGN 2024-11-21T04:24:33,361 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bd674b740b7e4d7818a9c1d139f861b9, ASSIGN 2024-11-21T04:24:33,362 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=420a7d1da52b47aef21498fe0f4727b7, ASSIGN; state=OFFLINE, location=fdc0a393186c,42393,1732162938268; forceNewPlan=false, retain=false 2024-11-21T04:24:33,362 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bd674b740b7e4d7818a9c1d139f861b9, ASSIGN; state=OFFLINE, location=fdc0a393186c,34363,1732162938185; forceNewPlan=false, retain=false 2024-11-21T04:24:33,512 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:24:33,513 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=420a7d1da52b47aef21498fe0f4727b7, regionState=OPENING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:33,513 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=bd674b740b7e4d7818a9c1d139f861b9, regionState=OPENING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:33,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=420a7d1da52b47aef21498fe0f4727b7, ASSIGN because future has completed 2024-11-21T04:24:33,515 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=134, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 420a7d1da52b47aef21498fe0f4727b7, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:24:33,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bd674b740b7e4d7818a9c1d139f861b9, ASSIGN because future has completed 2024-11-21T04:24:33,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd674b740b7e4d7818a9c1d139f861b9, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:24:33,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-21T04:24:33,670 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:33,670 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:33,670 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => bd674b740b7e4d7818a9c1d139f861b9, NAME => 'testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T04:24:33,670 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7752): Opening region: {ENCODED => 420a7d1da52b47aef21498fe0f4727b7, NAME => 'testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T04:24:33,671 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. service=AccessControlService 2024-11-21T04:24:33,671 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. service=AccessControlService 2024-11-21T04:24:33,671 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:24:33,671 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:24:33,671 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:33,671 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:33,671 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:33,671 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:33,671 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7794): checking encryption for 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:33,671 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:33,671 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:33,671 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7797): checking classloading for 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:33,675 INFO [StoreOpener-420a7d1da52b47aef21498fe0f4727b7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:33,677 INFO [StoreOpener-420a7d1da52b47aef21498fe0f4727b7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 420a7d1da52b47aef21498fe0f4727b7 columnFamilyName cf 2024-11-21T04:24:33,677 DEBUG [StoreOpener-420a7d1da52b47aef21498fe0f4727b7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:33,678 INFO [StoreOpener-420a7d1da52b47aef21498fe0f4727b7-1 {}] regionserver.HStore(327): Store=420a7d1da52b47aef21498fe0f4727b7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:24:33,678 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1038): replaying wal for 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:33,679 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:33,679 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:33,679 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1048): stopping wal replay for 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:33,679 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1060): Cleaning up temporary data for 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:33,681 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1093): writing seq id for 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:33,683 INFO [StoreOpener-bd674b740b7e4d7818a9c1d139f861b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:33,684 INFO [StoreOpener-bd674b740b7e4d7818a9c1d139f861b9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bd674b740b7e4d7818a9c1d139f861b9 columnFamilyName cf 2024-11-21T04:24:33,684 DEBUG [StoreOpener-bd674b740b7e4d7818a9c1d139f861b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:33,685 INFO [StoreOpener-bd674b740b7e4d7818a9c1d139f861b9-1 {}] regionserver.HStore(327): Store=bd674b740b7e4d7818a9c1d139f861b9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:24:33,685 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:33,686 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:33,686 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:33,686 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:33,686 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:33,687 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:24:33,688 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1114): Opened 420a7d1da52b47aef21498fe0f4727b7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68022411, jitterRate=0.013612911105155945}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:24:33,688 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:33,688 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:33,689 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1006): Region open journal for 420a7d1da52b47aef21498fe0f4727b7: Running coprocessor pre-open hook at 1732163073671Writing region info on filesystem at 1732163073671Initializing all the Stores at 1732163073675 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163073675Cleaning up temporary data from old regions at 1732163073679 (+4 ms)Running coprocessor post-open hooks at 1732163073688 (+9 ms)Region opened successfully at 1732163073689 (+1 ms) 2024-11-21T04:24:33,690 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7., pid=134, masterSystemTime=1732163073666 2024-11-21T04:24:33,691 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:24:33,692 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:33,692 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:33,692 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened bd674b740b7e4d7818a9c1d139f861b9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66700857, jitterRate=-0.006079778075218201}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:24:33,692 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:33,692 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for bd674b740b7e4d7818a9c1d139f861b9: Running coprocessor pre-open hook at 1732163073671Writing region info on filesystem at 1732163073671Initializing all the Stores at 1732163073675 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163073675Cleaning up temporary data from old regions at 1732163073686 (+11 ms)Running coprocessor post-open hooks at 1732163073692 (+6 ms)Region opened successfully at 1732163073692 2024-11-21T04:24:33,693 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9., pid=135, masterSystemTime=1732163073667 2024-11-21T04:24:33,693 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=420a7d1da52b47aef21498fe0f4727b7, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:33,696 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:33,696 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:33,696 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=bd674b740b7e4d7818a9c1d139f861b9, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:33,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 420a7d1da52b47aef21498fe0f4727b7, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:24:33,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure bd674b740b7e4d7818a9c1d139f861b9, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:24:33,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=133 2024-11-21T04:24:33,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 420a7d1da52b47aef21498fe0f4727b7, server=fdc0a393186c,42393,1732162938268 in 184 msec 2024-11-21T04:24:33,705 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=132 2024-11-21T04:24:33,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=420a7d1da52b47aef21498fe0f4727b7, ASSIGN in 343 msec 2024-11-21T04:24:33,705 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=132, state=SUCCESS, hasLock=false; OpenRegionProcedure bd674b740b7e4d7818a9c1d139f861b9, server=fdc0a393186c,34363,1732162938185 in 187 msec 2024-11-21T04:24:33,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=132, resume processing ppid=131 2024-11-21T04:24:33,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bd674b740b7e4d7818a9c1d139f861b9, ASSIGN in 345 msec 2024-11-21T04:24:33,708 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:24:33,708 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163073708"}]},"ts":"1732163073708"} 2024-11-21T04:24:33,711 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-21T04:24:33,712 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:24:33,712 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-21T04:24:33,717 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-21T04:24:33,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:33,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:33,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:33,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:33,773 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:33,773 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:33,773 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:33,773 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:33,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 858 msec 2024-11-21T04:24:34,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-21T04:24:34,052 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-21T04:24:34,052 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-21T04:24:34,052 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:34,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-21T04:24:34,057 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:34,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-11-21T04:24:34,057 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T04:24:34,060 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-21T04:24:34,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163074060 (current time:1732163074060). 2024-11-21T04:24:34,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:24:34,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-21T04:24:34,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:24:34,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8b32677, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:34,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:34,062 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:34,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:34,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:34,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@404c692d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:34,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:34,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,064 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48328, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:34,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31eb846, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:34,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:34,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:34,067 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41316, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:34,068 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:24:34,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:34,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,068 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:34,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@498e1b0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:34,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:34,070 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:34,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:34,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:34,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c1efdd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:34,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:34,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,071 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:34,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e29db5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:34,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:34,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:34,075 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41324, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:34,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:34,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:34,078 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33318, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:34,079 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:24:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,079 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-21T04:24:34,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:24:34,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-21T04:24:34,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-21T04:24:34,082 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:24:34,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-21T04:24:34,083 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:24:34,085 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:24:34,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742147_1323 (size=170) 2024-11-21T04:24:34,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742147_1323 (size=170) 2024-11-21T04:24:34,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742147_1323 (size=170) 2024-11-21T04:24:34,092 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:24:34,092 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd674b740b7e4d7818a9c1d139f861b9}, {pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 420a7d1da52b47aef21498fe0f4727b7}] 2024-11-21T04:24:34,093 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:34,093 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:34,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-21T04:24:34,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-21T04:24:34,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=137 2024-11-21T04:24:34,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:34,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.HRegion(2603): Flush status journal for bd674b740b7e4d7818a9c1d139f861b9: 2024-11-21T04:24:34,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-21T04:24:34,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:34,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:24:34,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:34,247 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 420a7d1da52b47aef21498fe0f4727b7: 2024-11-21T04:24:34,247 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-21T04:24:34,247 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,247 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:34,247 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:24:34,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742148_1324 (size=71) 2024-11-21T04:24:34,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742149_1325 (size=71) 2024-11-21T04:24:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742148_1324 (size=71) 2024-11-21T04:24:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742148_1324 (size=71) 2024-11-21T04:24:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742149_1325 (size=71) 2024-11-21T04:24:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742149_1325 (size=71) 2024-11-21T04:24:34,256 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:34,256 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-21T04:24:34,257 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:34,257 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-21T04:24:34,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-21T04:24:34,257 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:34,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=137 2024-11-21T04:24:34,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:34,257 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:34,257 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:34,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 420a7d1da52b47aef21498fe0f4727b7 in 166 msec 2024-11-21T04:24:34,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=137, resume processing ppid=136 2024-11-21T04:24:34,260 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:24:34,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bd674b740b7e4d7818a9c1d139f861b9 in 166 msec 2024-11-21T04:24:34,261 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:24:34,261 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:24:34,261 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,262 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742150_1326 (size=552) 2024-11-21T04:24:34,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742150_1326 (size=552) 2024-11-21T04:24:34,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742150_1326 (size=552) 2024-11-21T04:24:34,284 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:24:34,289 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:24:34,290 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,291 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:24:34,292 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-21T04:24:34,293 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 212 msec 2024-11-21T04:24:34,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-21T04:24:34,402 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-21T04:24:34,406 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='097409d6fec939f966a6b0b5c8d13084e', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:24:34,406 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='1dab6cbd0e5db92231b4307376a2b0b64', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:34,407 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='20a20df986ec49d8b2e76ab63459aa925', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:34,408 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='3a01ecfc4e9e556a27cd5caaa72c88def', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:34,408 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='4c49942acd2e056b513e556b59ca33c10', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:34,411 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:24:34,413 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42393 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:24:34,414 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T04:24:34,417 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-21T04:24:34,417 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:34,417 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:34,419 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T04:24:34,423 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T04:24:34,428 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T04:24:34,430 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-21T04:24:34,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163074430 (current time:1732163074430). 2024-11-21T04:24:34,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:24:34,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-21T04:24:34,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:24:34,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41856f06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:34,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:34,432 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:34,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:34,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:34,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47a57ffd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:34,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:34,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,433 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48364, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:34,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@309603f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:34,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:34,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:34,436 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41338, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:34,437 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:24:34,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:34,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,437 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:34,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54d79cb1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:34,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:34,439 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:34,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:34,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:34,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30e36cb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:34,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:34,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,440 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48372, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:34,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@264588bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:34,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:34,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:34,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:34,443 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41342, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:34,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:34,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:34,446 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33332, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:34,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:24:34,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:34,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:34,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-21T04:24:34,448 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:34,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:24:34,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-21T04:24:34,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-21T04:24:34,450 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:24:34,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-21T04:24:34,451 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:24:34,453 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:24:34,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742151_1327 (size=165) 2024-11-21T04:24:34,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742151_1327 (size=165) 2024-11-21T04:24:34,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742151_1327 (size=165) 2024-11-21T04:24:34,461 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:24:34,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd674b740b7e4d7818a9c1d139f861b9}, {pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 420a7d1da52b47aef21498fe0f4727b7}] 2024-11-21T04:24:34,462 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:34,462 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:34,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-21T04:24:34,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-21T04:24:34,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=140 2024-11-21T04:24:34,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:34,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:34,614 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2902): Flushing bd674b740b7e4d7818a9c1d139f861b9 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-21T04:24:34,614 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 420a7d1da52b47aef21498fe0f4727b7 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-21T04:24:34,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/.tmp/cf/a86eeb2338e040cb9ea5e6620d2af785 is 71, key is 20bb26a53f171a02d93c9b934a9b3d9b/cf:q/1732163074413/Put/seqid=0 2024-11-21T04:24:34,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/.tmp/cf/16e884df3e7646838c95f2695ac528f7 is 69, key is 097409d6fec939f966a6b0b5c8d13084e/cf:q/1732163074411/Put/seqid=0 2024-11-21T04:24:34,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742152_1328 (size=5149) 2024-11-21T04:24:34,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742152_1328 (size=5149) 2024-11-21T04:24:34,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742152_1328 (size=5149) 2024-11-21T04:24:34,667 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/.tmp/cf/16e884df3e7646838c95f2695ac528f7 2024-11-21T04:24:34,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/.tmp/cf/16e884df3e7646838c95f2695ac528f7 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/cf/16e884df3e7646838c95f2695ac528f7 2024-11-21T04:24:34,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742153_1329 (size=8462) 2024-11-21T04:24:34,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742153_1329 (size=8462) 2024-11-21T04:24:34,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742153_1329 (size=8462) 2024-11-21T04:24:34,681 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/.tmp/cf/a86eeb2338e040cb9ea5e6620d2af785 2024-11-21T04:24:34,682 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/cf/16e884df3e7646838c95f2695ac528f7, entries=1, sequenceid=6, filesize=5.0 K 2024-11-21T04:24:34,683 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for bd674b740b7e4d7818a9c1d139f861b9 in 68ms, sequenceid=6, compaction requested=false 2024-11-21T04:24:34,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-21T04:24:34,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2603): Flush status journal for bd674b740b7e4d7818a9c1d139f861b9: 2024-11-21T04:24:34,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. for snaptb0-testExportExpiredSnapshot completed. 2024-11-21T04:24:34,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:34,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/cf/16e884df3e7646838c95f2695ac528f7] hfiles 2024-11-21T04:24:34,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/cf/16e884df3e7646838c95f2695ac528f7 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/.tmp/cf/a86eeb2338e040cb9ea5e6620d2af785 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/cf/a86eeb2338e040cb9ea5e6620d2af785 2024-11-21T04:24:34,695 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/cf/a86eeb2338e040cb9ea5e6620d2af785, entries=49, sequenceid=6, filesize=8.3 K 2024-11-21T04:24:34,697 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 420a7d1da52b47aef21498fe0f4727b7 in 82ms, sequenceid=6, compaction requested=false 2024-11-21T04:24:34,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 420a7d1da52b47aef21498fe0f4727b7: 2024-11-21T04:24:34,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. for snaptb0-testExportExpiredSnapshot completed. 2024-11-21T04:24:34,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:34,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/cf/a86eeb2338e040cb9ea5e6620d2af785] hfiles 2024-11-21T04:24:34,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/cf/a86eeb2338e040cb9ea5e6620d2af785 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742154_1330 (size=110) 2024-11-21T04:24:34,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742154_1330 (size=110) 2024-11-21T04:24:34,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742154_1330 (size=110) 2024-11-21T04:24:34,699 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:34,699 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=140 2024-11-21T04:24:34,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=140 2024-11-21T04:24:34,699 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:34,700 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:34,702 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bd674b740b7e4d7818a9c1d139f861b9 in 240 msec 2024-11-21T04:24:34,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742155_1331 (size=110) 2024-11-21T04:24:34,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742155_1331 (size=110) 2024-11-21T04:24:34,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742155_1331 (size=110) 2024-11-21T04:24:34,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:34,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-21T04:24:34,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-21T04:24:34,715 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:34,716 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:34,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=139 2024-11-21T04:24:34,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 420a7d1da52b47aef21498fe0f4727b7 in 255 msec 2024-11-21T04:24:34,718 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:24:34,719 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:24:34,719 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:24:34,719 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,720 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742156_1332 (size=630) 2024-11-21T04:24:34,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742156_1332 (size=630) 2024-11-21T04:24:34,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742156_1332 (size=630) 2024-11-21T04:24:34,730 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:24:34,734 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:24:34,734 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-21T04:24:34,736 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:24:34,736 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-21T04:24:34,737 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 287 msec 2024-11-21T04:24:34,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-21T04:24:34,773 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-21T04:24:34,774 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:24:34,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-21T04:24:34,775 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:24:34,775 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:34,775 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 142 2024-11-21T04:24:34,776 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:24:34,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-21T04:24:34,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742157_1333 (size=400) 2024-11-21T04:24:34,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742157_1333 (size=400) 2024-11-21T04:24:34,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742157_1333 (size=400) 2024-11-21T04:24:34,785 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => fef81f9296f7039a4489a2709be189d3, NAME => 'testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:34,786 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => fa291fa3994bf7902442d1d129cd311c, NAME => 'testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:34,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742158_1334 (size=61) 2024-11-21T04:24:34,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742158_1334 (size=61) 2024-11-21T04:24:34,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742159_1335 (size=61) 2024-11-21T04:24:34,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742159_1335 (size=61) 2024-11-21T04:24:34,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742158_1334 (size=61) 2024-11-21T04:24:34,792 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:34,792 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing fef81f9296f7039a4489a2709be189d3, disabling compactions & flushes 2024-11-21T04:24:34,792 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:24:34,793 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:24:34,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742159_1335 (size=61) 2024-11-21T04:24:34,793 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. after waiting 0 ms 2024-11-21T04:24:34,793 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:24:34,793 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:24:34,793 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for fef81f9296f7039a4489a2709be189d3: Waiting for close lock at 1732163074792Disabling compacts and flushes for region at 1732163074792Disabling writes for close at 1732163074793 (+1 ms)Writing region close event to WAL at 1732163074793Closed at 1732163074793 2024-11-21T04:24:34,793 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:34,793 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing fa291fa3994bf7902442d1d129cd311c, disabling compactions & flushes 2024-11-21T04:24:34,793 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:24:34,793 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:24:34,793 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. after waiting 0 ms 2024-11-21T04:24:34,793 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:24:34,793 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:24:34,793 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for fa291fa3994bf7902442d1d129cd311c: Waiting for close lock at 1732163074793Disabling compacts and flushes for region at 1732163074793Disabling writes for close at 1732163074793Writing region close event to WAL at 1732163074793Closed at 1732163074793 2024-11-21T04:24:34,794 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:24:34,795 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732163074794"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163074794"}]},"ts":"1732163074794"} 2024-11-21T04:24:34,795 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732163074794"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163074794"}]},"ts":"1732163074794"} 2024-11-21T04:24:34,797 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:24:34,798 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:24:34,798 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163074798"}]},"ts":"1732163074798"} 2024-11-21T04:24:34,799 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-21T04:24:34,799 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:24:34,800 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:24:34,800 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:24:34,800 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:24:34,801 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:24:34,801 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:24:34,801 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:24:34,801 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:24:34,801 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:24:34,801 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:24:34,801 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:24:34,801 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=fef81f9296f7039a4489a2709be189d3, ASSIGN}, {pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=fa291fa3994bf7902442d1d129cd311c, ASSIGN}] 2024-11-21T04:24:34,802 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=fa291fa3994bf7902442d1d129cd311c, ASSIGN 2024-11-21T04:24:34,802 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=fef81f9296f7039a4489a2709be189d3, ASSIGN 2024-11-21T04:24:34,803 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=fa291fa3994bf7902442d1d129cd311c, ASSIGN; state=OFFLINE, location=fdc0a393186c,42393,1732162938268; forceNewPlan=false, retain=false 2024-11-21T04:24:34,803 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=fef81f9296f7039a4489a2709be189d3, ASSIGN; state=OFFLINE, location=fdc0a393186c,39347,1732162938035; forceNewPlan=false, retain=false 2024-11-21T04:24:34,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-21T04:24:34,953 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:24:34,953 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=fa291fa3994bf7902442d1d129cd311c, regionState=OPENING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:34,953 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=fef81f9296f7039a4489a2709be189d3, regionState=OPENING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:24:34,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=fa291fa3994bf7902442d1d129cd311c, ASSIGN because future has completed 2024-11-21T04:24:34,957 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure fa291fa3994bf7902442d1d129cd311c, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:24:34,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=fef81f9296f7039a4489a2709be189d3, ASSIGN because future has completed 2024-11-21T04:24:34,958 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure fef81f9296f7039a4489a2709be189d3, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:24:35,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-21T04:24:35,114 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:24:35,114 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7752): Opening region: {ENCODED => fa291fa3994bf7902442d1d129cd311c, NAME => 'testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T04:24:35,115 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. service=AccessControlService 2024-11-21T04:24:35,115 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:24:35,115 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,115 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:35,115 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:24:35,116 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7794): checking encryption for fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,116 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7797): checking classloading for fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,116 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => fef81f9296f7039a4489a2709be189d3, NAME => 'testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T04:24:35,116 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. service=AccessControlService 2024-11-21T04:24:35,116 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:24:35,117 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,117 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:35,117 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,117 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,118 INFO [StoreOpener-fa291fa3994bf7902442d1d129cd311c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,119 INFO [StoreOpener-fef81f9296f7039a4489a2709be189d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,121 INFO [StoreOpener-fa291fa3994bf7902442d1d129cd311c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fa291fa3994bf7902442d1d129cd311c columnFamilyName cf 2024-11-21T04:24:35,121 DEBUG [StoreOpener-fa291fa3994bf7902442d1d129cd311c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:35,121 INFO [StoreOpener-fef81f9296f7039a4489a2709be189d3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fef81f9296f7039a4489a2709be189d3 columnFamilyName cf 2024-11-21T04:24:35,121 DEBUG [StoreOpener-fef81f9296f7039a4489a2709be189d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:35,121 INFO [StoreOpener-fa291fa3994bf7902442d1d129cd311c-1 {}] regionserver.HStore(327): Store=fa291fa3994bf7902442d1d129cd311c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:24:35,122 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1038): replaying wal for fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,122 INFO [StoreOpener-fef81f9296f7039a4489a2709be189d3-1 {}] regionserver.HStore(327): Store=fef81f9296f7039a4489a2709be189d3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:24:35,122 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,123 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,123 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,123 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,123 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,123 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,123 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,123 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1048): stopping wal replay for fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,124 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1060): Cleaning up temporary data for fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,125 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,125 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1093): writing seq id for fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,127 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fef81f9296f7039a4489a2709be189d3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:24:35,127 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fa291fa3994bf7902442d1d129cd311c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:24:35,127 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened fef81f9296f7039a4489a2709be189d3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70582086, jitterRate=0.051755040884017944}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:24:35,128 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,127 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1114): Opened fa291fa3994bf7902442d1d129cd311c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67038844, jitterRate=-0.0010433793067932129}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:24:35,128 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,128 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for fef81f9296f7039a4489a2709be189d3: Running coprocessor pre-open hook at 1732163075117Writing region info on filesystem at 1732163075117Initializing all the Stores at 1732163075118 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163075119 (+1 ms)Cleaning up temporary data from old regions at 1732163075123 (+4 ms)Running coprocessor post-open hooks at 1732163075128 (+5 ms)Region opened successfully at 1732163075128 2024-11-21T04:24:35,128 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1006): Region open journal for fa291fa3994bf7902442d1d129cd311c: Running coprocessor pre-open hook at 1732163075116Writing region info on filesystem at 1732163075116Initializing all the Stores at 1732163075117 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163075118 (+1 ms)Cleaning up temporary data from old regions at 1732163075124 (+6 ms)Running coprocessor post-open hooks at 1732163075128 (+4 ms)Region opened successfully at 1732163075128 2024-11-21T04:24:35,129 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3., pid=146, masterSystemTime=1732163075111 2024-11-21T04:24:35,131 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c., pid=145, masterSystemTime=1732163075109 2024-11-21T04:24:35,132 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:24:35,132 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:24:35,132 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=fef81f9296f7039a4489a2709be189d3, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:24:35,133 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:24:35,133 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:24:35,133 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=fa291fa3994bf7902442d1d129cd311c, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:35,134 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure fef81f9296f7039a4489a2709be189d3, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:24:35,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure fa291fa3994bf7902442d1d129cd311c, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:24:35,137 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=143 2024-11-21T04:24:35,137 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=143, state=SUCCESS, hasLock=false; OpenRegionProcedure fef81f9296f7039a4489a2709be189d3, server=fdc0a393186c,39347,1732162938035 in 177 msec 2024-11-21T04:24:35,138 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=144 2024-11-21T04:24:35,138 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure fa291fa3994bf7902442d1d129cd311c, server=fdc0a393186c,42393,1732162938268 in 180 msec 2024-11-21T04:24:35,138 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=fef81f9296f7039a4489a2709be189d3, ASSIGN in 336 msec 2024-11-21T04:24:35,139 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=142 2024-11-21T04:24:35,139 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=fa291fa3994bf7902442d1d129cd311c, ASSIGN in 337 msec 2024-11-21T04:24:35,140 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:24:35,140 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163075140"}]},"ts":"1732163075140"} 2024-11-21T04:24:35,141 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-21T04:24:35,142 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:24:35,142 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-21T04:24:35,145 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-21T04:24:35,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:35,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:35,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:35,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:35,198 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:35,198 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:35,198 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:35,198 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:35,198 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:35,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:35,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:35,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:35,199 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 423 msec 2024-11-21T04:24:35,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-21T04:24:35,403 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-21T04:24:35,403 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-21T04:24:35,404 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:35,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-21T04:24:35,413 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:35,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-11-21T04:24:35,413 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T04:24:35,419 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='018005d16c70890440e95d13403cf6618', locateType=CURRENT is [region=testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3., hostname=fdc0a393186c,39347,1732162938035, seqNum=2] 2024-11-21T04:24:35,420 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='1910f489fd84c9e952256676cf923d178', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:35,421 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='3bf8599d04051a71c9f379b2e0da829c2', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:35,422 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='243231feb5518fe0861e3aef3a8232d6b', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:35,423 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='496945f06253dcba8742d2f188ec10724', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:35,427 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39347 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:24:35,430 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42393 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:24:35,432 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T04:24:35,434 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-21T04:24:35,434 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:24:35,434 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:35,436 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T04:24:35,440 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-21T04:24:35,446 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-21T04:24:35,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-21T04:24:35,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:24:35,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ae8c4d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:35,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:35,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:35,448 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:35,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:35,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:35,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@634fd98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:35,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:35,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:35,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:35,449 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48402, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:35,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8ec9dc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:35,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:35,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:35,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:35,451 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41344, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:35,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:24:35,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:35,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:35,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:35,453 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:35,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a1b35b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:35,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:35,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:35,454 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:35,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:35,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:35,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46ebe35e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:35,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:35,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:35,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:35,455 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48420, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:35,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e40848f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:35,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:35,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:35,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:35,458 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41354, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:35,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:35,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:35,461 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33344, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:35,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577. 2024-11-21T04:24:35,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:35,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:35,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:35,462 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:35,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-21T04:24:35,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:24:35,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-21T04:24:35,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-21T04:24:35,465 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:24:35,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-21T04:24:35,466 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:24:35,468 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:24:35,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742160_1336 (size=152) 2024-11-21T04:24:35,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742160_1336 (size=152) 2024-11-21T04:24:35,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742160_1336 (size=152) 2024-11-21T04:24:35,474 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:24:35,474 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fef81f9296f7039a4489a2709be189d3}, {pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fa291fa3994bf7902442d1d129cd311c}] 2024-11-21T04:24:35,475 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:35,475 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:35,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-21T04:24:35,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=149 2024-11-21T04:24:35,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=148 2024-11-21T04:24:35,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:24:35,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:24:35,629 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2902): Flushing fef81f9296f7039a4489a2709be189d3 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-21T04:24:35,629 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2902): Flushing fa291fa3994bf7902442d1d129cd311c 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-21T04:24:35,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fa291fa3994bf7902442d1d129cd311c/.tmp/cf/b32d4aeeb33943b7a76769c5b8a560b0 is 71, key is 21cde7034e8821f1fdc4fac111e2d04a/cf:q/1732163075430/Put/seqid=0 2024-11-21T04:24:35,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fef81f9296f7039a4489a2709be189d3/.tmp/cf/6f826a7babca4077be931a97105ef60e is 71, key is 097875896e2dade6d98b40245afe7dd0/cf:q/1732163075427/Put/seqid=0 2024-11-21T04:24:35,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742162_1338 (size=5356) 2024-11-21T04:24:35,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742161_1337 (size=8258) 2024-11-21T04:24:35,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742162_1338 (size=5356) 2024-11-21T04:24:35,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742161_1337 (size=8258) 2024-11-21T04:24:35,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742161_1337 (size=8258) 2024-11-21T04:24:35,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742162_1338 (size=5356) 2024-11-21T04:24:35,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-21T04:24:36,056 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fef81f9296f7039a4489a2709be189d3/.tmp/cf/6f826a7babca4077be931a97105ef60e 2024-11-21T04:24:36,056 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fa291fa3994bf7902442d1d129cd311c/.tmp/cf/b32d4aeeb33943b7a76769c5b8a560b0 2024-11-21T04:24:36,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fa291fa3994bf7902442d1d129cd311c/.tmp/cf/b32d4aeeb33943b7a76769c5b8a560b0 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fa291fa3994bf7902442d1d129cd311c/cf/b32d4aeeb33943b7a76769c5b8a560b0 2024-11-21T04:24:36,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fef81f9296f7039a4489a2709be189d3/.tmp/cf/6f826a7babca4077be931a97105ef60e as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fef81f9296f7039a4489a2709be189d3/cf/6f826a7babca4077be931a97105ef60e 2024-11-21T04:24:36,070 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fa291fa3994bf7902442d1d129cd311c/cf/b32d4aeeb33943b7a76769c5b8a560b0, entries=46, sequenceid=5, filesize=8.1 K 2024-11-21T04:24:36,070 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fef81f9296f7039a4489a2709be189d3/cf/6f826a7babca4077be931a97105ef60e, entries=4, sequenceid=5, filesize=5.2 K 2024-11-21T04:24:36,071 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for fef81f9296f7039a4489a2709be189d3 in 442ms, sequenceid=5, compaction requested=false 2024-11-21T04:24:36,071 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for fa291fa3994bf7902442d1d129cd311c in 442ms, sequenceid=5, compaction requested=false 2024-11-21T04:24:36,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-21T04:24:36,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-21T04:24:36,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2603): Flush status journal for fef81f9296f7039a4489a2709be189d3: 2024-11-21T04:24:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2603): Flush status journal for fa291fa3994bf7902442d1d129cd311c: 2024-11-21T04:24:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. for snapshot-testExportExpiredSnapshot completed. 2024-11-21T04:24:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. for snapshot-testExportExpiredSnapshot completed. 2024-11-21T04:24:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-21T04:24:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-21T04:24:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fef81f9296f7039a4489a2709be189d3/cf/6f826a7babca4077be931a97105ef60e] hfiles 2024-11-21T04:24:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fef81f9296f7039a4489a2709be189d3/cf/6f826a7babca4077be931a97105ef60e for snapshot=snapshot-testExportExpiredSnapshot 2024-11-21T04:24:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fa291fa3994bf7902442d1d129cd311c/cf/b32d4aeeb33943b7a76769c5b8a560b0] hfiles 2024-11-21T04:24:36,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fa291fa3994bf7902442d1d129cd311c/cf/b32d4aeeb33943b7a76769c5b8a560b0 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-21T04:24:36,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742164_1340 (size=103) 2024-11-21T04:24:36,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742164_1340 (size=103) 2024-11-21T04:24:36,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742164_1340 (size=103) 2024-11-21T04:24:36,082 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:24:36,082 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=148 2024-11-21T04:24:36,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=148 2024-11-21T04:24:36,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:36,083 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fef81f9296f7039a4489a2709be189d3 2024-11-21T04:24:36,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fef81f9296f7039a4489a2709be189d3 in 610 msec 2024-11-21T04:24:36,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742163_1339 (size=103) 2024-11-21T04:24:36,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742163_1339 (size=103) 2024-11-21T04:24:36,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742163_1339 (size=103) 2024-11-21T04:24:36,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:24:36,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-21T04:24:36,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-21T04:24:36,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=149 2024-11-21T04:24:36,092 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:36,092 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:24:36,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=147 2024-11-21T04:24:36,107 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:24:36,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fa291fa3994bf7902442d1d129cd311c in 631 msec 2024-11-21T04:24:36,108 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:24:36,108 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:24:36,108 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-21T04:24:36,109 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-21T04:24:36,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742165_1341 (size=609) 2024-11-21T04:24:36,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742165_1341 (size=609) 2024-11-21T04:24:36,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742165_1341 (size=609) 2024-11-21T04:24:36,119 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:24:36,123 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:24:36,123 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-21T04:24:36,124 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:24:36,125 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-21T04:24:36,126 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 662 msec 2024-11-21T04:24:36,297 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0006_000001 (auth:SIMPLE) from 127.0.0.1:59202 2024-11-21T04:24:36,309 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_0/usercache/jenkins/appcache/application_1732162945431_0006/container_1732162945431_0006_01_000001/launch_container.sh] 2024-11-21T04:24:36,309 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_0/usercache/jenkins/appcache/application_1732162945431_0006/container_1732162945431_0006_01_000001/container_tokens] 2024-11-21T04:24:36,309 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_0/usercache/jenkins/appcache/application_1732162945431_0006/container_1732162945431_0006_01_000001/sysfs] 2024-11-21T04:24:36,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-21T04:24:36,603 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-21T04:24:37,775 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:24:37,803 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-21T04:24:37,803 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-21T04:24:37,803 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-21T04:24:37,803 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-21T04:24:37,804 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-21T04:24:37,804 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-21T04:24:43,306 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:24:46,350 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:24:46,613 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163086613 2024-11-21T04:24:46,613 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44067, tgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163086613, rawTgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163086613, srcFsUri=hdfs://localhost:44067, srcDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:46,654 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44067, inputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:46,654 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163086613, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163086613/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-21T04:24:46,658 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T04:24:46,660 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:951) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1096) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:314) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T04:24:46,661 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-11-21T04:24:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T04:24:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-21T04:24:46,665 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163086665"}]},"ts":"1732163086665"} 2024-11-21T04:24:46,668 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-21T04:24:46,668 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-21T04:24:46,669 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-21T04:24:46,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bd674b740b7e4d7818a9c1d139f861b9, UNASSIGN}, {pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=420a7d1da52b47aef21498fe0f4727b7, UNASSIGN}] 2024-11-21T04:24:46,671 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=420a7d1da52b47aef21498fe0f4727b7, UNASSIGN 2024-11-21T04:24:46,671 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bd674b740b7e4d7818a9c1d139f861b9, UNASSIGN 2024-11-21T04:24:46,675 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=bd674b740b7e4d7818a9c1d139f861b9, regionState=CLOSING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:24:46,675 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=420a7d1da52b47aef21498fe0f4727b7, regionState=CLOSING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:46,749 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=fdc0a393186c,42393,1732162938268, table=testtb-testExportExpiredSnapshot, region=420a7d1da52b47aef21498fe0f4727b7. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-21T04:24:46,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-21T04:24:46,774 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=fdc0a393186c,34363,1732162938185, table=testtb-testExportExpiredSnapshot, region=bd674b740b7e4d7818a9c1d139f861b9. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-21T04:24:46,781 INFO [AsyncFSWAL-0-hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a-prefix:fdc0a393186c,34363,1732162938185.meta {}] wal.AbstractFSWAL(1368): Slow sync cost: 104 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38043,DS-592a7e82-9870-4bba-b814-b684c730bac4,DISK], DatanodeInfoWithStorage[127.0.0.1:42055,DS-843822a9-dcff-4e93-b597-54fc35d2cf04,DISK], DatanodeInfoWithStorage[127.0.0.1:33643,DS-e1da3505-cf9b-4bb4-8cd9-49cb6837bc8e,DISK]] 2024-11-21T04:24:46,781 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bd674b740b7e4d7818a9c1d139f861b9, UNASSIGN because future has completed 2024-11-21T04:24:46,782 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:24:46,782 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure bd674b740b7e4d7818a9c1d139f861b9, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:24:46,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=420a7d1da52b47aef21498fe0f4727b7, UNASSIGN because future has completed 2024-11-21T04:24:46,786 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:24:46,786 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=155, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 420a7d1da52b47aef21498fe0f4727b7, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:24:46,938 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(122): Close bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:46,938 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:24:46,938 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1722): Closing bd674b740b7e4d7818a9c1d139f861b9, disabling compactions & flushes 2024-11-21T04:24:46,938 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:46,938 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:46,938 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. after waiting 0 ms 2024-11-21T04:24:46,938 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:46,942 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(122): Close 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:46,942 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:24:46,942 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1722): Closing 420a7d1da52b47aef21498fe0f4727b7, disabling compactions & flushes 2024-11-21T04:24:46,942 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:46,942 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:46,942 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. after waiting 0 ms 2024-11-21T04:24:46,942 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:46,959 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:24:46,960 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:24:46,960 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9. 2024-11-21T04:24:46,960 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1676): Region close journal for bd674b740b7e4d7818a9c1d139f861b9: Waiting for close lock at 1732163086938Running coprocessor pre-close hooks at 1732163086938Disabling compacts and flushes for region at 1732163086938Disabling writes for close at 1732163086938Writing region close event to WAL at 1732163086941 (+3 ms)Running coprocessor post-close hooks at 1732163086960 (+19 ms)Closed at 1732163086960 2024-11-21T04:24:46,960 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:24:46,961 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:24:46,961 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7. 2024-11-21T04:24:46,961 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1676): Region close journal for 420a7d1da52b47aef21498fe0f4727b7: Waiting for close lock at 1732163086942Running coprocessor pre-close hooks at 1732163086942Disabling compacts and flushes for region at 1732163086942Disabling writes for close at 1732163086942Writing region close event to WAL at 1732163086948 (+6 ms)Running coprocessor post-close hooks at 1732163086961 (+13 ms)Closed at 1732163086961 2024-11-21T04:24:46,963 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(157): Closed bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:46,964 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=bd674b740b7e4d7818a9c1d139f861b9, regionState=CLOSED 2024-11-21T04:24:46,964 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(157): Closed 420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:46,965 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=420a7d1da52b47aef21498fe0f4727b7, regionState=CLOSED 2024-11-21T04:24:46,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure bd674b740b7e4d7818a9c1d139f861b9, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:24:46,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=155, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 420a7d1da52b47aef21498fe0f4727b7, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:24:46,973 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=152 2024-11-21T04:24:46,973 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=152, state=SUCCESS, hasLock=false; CloseRegionProcedure bd674b740b7e4d7818a9c1d139f861b9, server=fdc0a393186c,34363,1732162938185 in 188 msec 2024-11-21T04:24:46,973 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=155, resume processing ppid=153 2024-11-21T04:24:46,973 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, ppid=153, state=SUCCESS, hasLock=false; CloseRegionProcedure 420a7d1da52b47aef21498fe0f4727b7, server=fdc0a393186c,42393,1732162938268 in 185 msec 2024-11-21T04:24:46,974 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bd674b740b7e4d7818a9c1d139f861b9, UNASSIGN in 303 msec 2024-11-21T04:24:46,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=151 2024-11-21T04:24:46,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=420a7d1da52b47aef21498fe0f4727b7, UNASSIGN in 303 msec 2024-11-21T04:24:46,978 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-11-21T04:24:46,978 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 307 msec 2024-11-21T04:24:46,979 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163086978"}]},"ts":"1732163086978"} 2024-11-21T04:24:46,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-21T04:24:46,986 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-21T04:24:46,987 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-21T04:24:46,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 327 msec 2024-11-21T04:24:47,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-21T04:24:47,292 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-21T04:24:47,293 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,294 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,295 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=156, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,297 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,299 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:47,299 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:47,300 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/recovered.edits] 2024-11-21T04:24:47,300 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/recovered.edits] 2024-11-21T04:24:47,303 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/cf/a86eeb2338e040cb9ea5e6620d2af785 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/cf/a86eeb2338e040cb9ea5e6620d2af785 2024-11-21T04:24:47,303 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/cf/16e884df3e7646838c95f2695ac528f7 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/cf/16e884df3e7646838c95f2695ac528f7 2024-11-21T04:24:47,306 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7/recovered.edits/9.seqid 2024-11-21T04:24:47,306 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9/recovered.edits/9.seqid 2024-11-21T04:24:47,306 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/420a7d1da52b47aef21498fe0f4727b7 2024-11-21T04:24:47,306 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportExpiredSnapshot/bd674b740b7e4d7818a9c1d139f861b9 2024-11-21T04:24:47,306 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-21T04:24:47,308 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=156, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,310 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-21T04:24:47,312 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-21T04:24:47,313 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=156, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,313 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-21T04:24:47,314 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163087313"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:47,314 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163087313"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:47,316 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T04:24:47,316 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => bd674b740b7e4d7818a9c1d139f861b9, NAME => 'testtb-testExportExpiredSnapshot,,1732163072915.bd674b740b7e4d7818a9c1d139f861b9.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 420a7d1da52b47aef21498fe0f4727b7, NAME => 'testtb-testExportExpiredSnapshot,1,1732163072915.420a7d1da52b47aef21498fe0f4727b7.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T04:24:47,316 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-21T04:24:47,316 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732163087316"}]},"ts":"9223372036854775807"} 2024-11-21T04:24:47,318 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-21T04:24:47,318 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=156, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,319 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 25 msec 2024-11-21T04:24:47,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-21T04:24:47,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-21T04:24:47,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-21T04:24:47,346 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-21T04:24:47,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:47,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:47,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:47,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:47,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-21T04:24:47,358 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,358 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,358 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,358 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-21T04:24:47,358 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,359 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,367 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-21T04:24:47,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-21T04:24:47,370 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-21T04:24:47,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-21T04:24:47,374 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-21T04:24:47,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-21T04:24:47,393 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=799 (was 805), OpenFileDescriptor=789 (was 804), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=432 (was 547), ProcessCount=14 (was 23), AvailableMemoryMB=1535 (was 718) - AvailableMemoryMB LEAK? - 2024-11-21T04:24:47,393 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-11-21T04:24:47,408 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=799, OpenFileDescriptor=789, MaxFileDescriptor=1048576, SystemLoadAverage=432, ProcessCount=14, AvailableMemoryMB=1534 2024-11-21T04:24:47,408 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-11-21T04:24:47,410 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:24:47,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T04:24:47,411 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:24:47,411 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:47,411 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 157 2024-11-21T04:24:47,412 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:24:47,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-21T04:24:47,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742166_1342 (size=412) 2024-11-21T04:24:47,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742166_1342 (size=412) 2024-11-21T04:24:47,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742166_1342 (size=412) 2024-11-21T04:24:47,420 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => eeea7b63305943e96f26d7f622f19d8b, NAME => 'testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:47,421 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 76f43002dfe401371e684ca073e36f8c, NAME => 'testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:47,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742167_1343 (size=73) 2024-11-21T04:24:47,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742168_1344 (size=73) 2024-11-21T04:24:47,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742168_1344 (size=73) 2024-11-21T04:24:47,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742167_1343 (size=73) 2024-11-21T04:24:47,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742167_1343 (size=73) 2024-11-21T04:24:47,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742168_1344 (size=73) 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 76f43002dfe401371e684ca073e36f8c, disabling compactions & flushes 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing eeea7b63305943e96f26d7f622f19d8b, disabling compactions & flushes 2024-11-21T04:24:47,427 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:24:47,427 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. after waiting 0 ms 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. after waiting 0 ms 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:47,427 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:24:47,427 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 76f43002dfe401371e684ca073e36f8c: Waiting for close lock at 1732163087427Disabling compacts and flushes for region at 1732163087427Disabling writes for close at 1732163087427Writing region close event to WAL at 1732163087427Closed at 1732163087427 2024-11-21T04:24:47,427 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for eeea7b63305943e96f26d7f622f19d8b: Waiting for close lock at 1732163087427Disabling compacts and flushes for region at 1732163087427Disabling writes for close at 1732163087427Writing region close event to WAL at 1732163087427Closed at 1732163087427 2024-11-21T04:24:47,428 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:24:47,429 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732163087428"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163087428"}]},"ts":"1732163087428"} 2024-11-21T04:24:47,429 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732163087428"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163087428"}]},"ts":"1732163087428"} 2024-11-21T04:24:47,431 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:24:47,432 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:24:47,432 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163087432"}]},"ts":"1732163087432"} 2024-11-21T04:24:47,433 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-21T04:24:47,434 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:24:47,434 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:24:47,434 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:24:47,434 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:24:47,434 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:24:47,434 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:24:47,434 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:24:47,434 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:24:47,434 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:24:47,434 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:24:47,435 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:24:47,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=eeea7b63305943e96f26d7f622f19d8b, ASSIGN}, {pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=76f43002dfe401371e684ca073e36f8c, ASSIGN}] 2024-11-21T04:24:47,436 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=76f43002dfe401371e684ca073e36f8c, ASSIGN 2024-11-21T04:24:47,436 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=eeea7b63305943e96f26d7f622f19d8b, ASSIGN 2024-11-21T04:24:47,436 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=76f43002dfe401371e684ca073e36f8c, ASSIGN; state=OFFLINE, location=fdc0a393186c,39347,1732162938035; forceNewPlan=false, retain=false 2024-11-21T04:24:47,436 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=eeea7b63305943e96f26d7f622f19d8b, ASSIGN; state=OFFLINE, location=fdc0a393186c,42393,1732162938268; forceNewPlan=false, retain=false 2024-11-21T04:24:47,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-21T04:24:47,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-21T04:24:47,587 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:24:47,588 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=eeea7b63305943e96f26d7f622f19d8b, regionState=OPENING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:47,588 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=76f43002dfe401371e684ca073e36f8c, regionState=OPENING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:24:47,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=eeea7b63305943e96f26d7f622f19d8b, ASSIGN because future has completed 2024-11-21T04:24:47,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure eeea7b63305943e96f26d7f622f19d8b, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:24:47,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=76f43002dfe401371e684ca073e36f8c, ASSIGN because future has completed 2024-11-21T04:24:47,596 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=161, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure 76f43002dfe401371e684ca073e36f8c, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:24:47,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-21T04:24:47,750 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:47,750 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7752): Opening region: {ENCODED => eeea7b63305943e96f26d7f622f19d8b, NAME => 'testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T04:24:47,750 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. service=AccessControlService 2024-11-21T04:24:47,750 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:24:47,751 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:47,751 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:47,751 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7794): checking encryption for eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:47,751 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7797): checking classloading for eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:47,752 INFO [StoreOpener-eeea7b63305943e96f26d7f622f19d8b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:47,753 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:24:47,753 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7752): Opening region: {ENCODED => 76f43002dfe401371e684ca073e36f8c, NAME => 'testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T04:24:47,753 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. service=AccessControlService 2024-11-21T04:24:47,754 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:24:47,754 INFO [StoreOpener-eeea7b63305943e96f26d7f622f19d8b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eeea7b63305943e96f26d7f622f19d8b columnFamilyName cf 2024-11-21T04:24:47,754 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:47,754 DEBUG [StoreOpener-eeea7b63305943e96f26d7f622f19d8b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:47,754 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:24:47,754 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7794): checking encryption for 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:47,754 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7797): checking classloading for 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:47,754 INFO [StoreOpener-eeea7b63305943e96f26d7f622f19d8b-1 {}] regionserver.HStore(327): Store=eeea7b63305943e96f26d7f622f19d8b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:24:47,755 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1038): replaying wal for eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:47,756 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:47,756 INFO [StoreOpener-76f43002dfe401371e684ca073e36f8c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:47,756 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:47,756 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1048): stopping wal replay for eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:47,756 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1060): Cleaning up temporary data for eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:47,757 INFO [StoreOpener-76f43002dfe401371e684ca073e36f8c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76f43002dfe401371e684ca073e36f8c columnFamilyName cf 2024-11-21T04:24:47,757 DEBUG [StoreOpener-76f43002dfe401371e684ca073e36f8c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:24:47,758 INFO [StoreOpener-76f43002dfe401371e684ca073e36f8c-1 {}] regionserver.HStore(327): Store=76f43002dfe401371e684ca073e36f8c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:24:47,758 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1038): replaying wal for 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:47,758 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1093): writing seq id for eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:47,759 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:47,759 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:47,759 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1048): stopping wal replay for 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:47,760 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1060): Cleaning up temporary data for 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:47,760 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:24:47,761 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1114): Opened eeea7b63305943e96f26d7f622f19d8b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59789330, jitterRate=-0.10906955599784851}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:24:47,761 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1122): Running coprocessor post-open hooks for eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:47,761 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1093): writing seq id for 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:47,761 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1006): Region open journal for eeea7b63305943e96f26d7f622f19d8b: Running coprocessor pre-open hook at 1732163087751Writing region info on filesystem at 1732163087751Initializing all the Stores at 1732163087752 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163087752Cleaning up temporary data from old regions at 1732163087756 (+4 ms)Running coprocessor post-open hooks at 1732163087761 (+5 ms)Region opened successfully at 1732163087761 2024-11-21T04:24:47,762 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b., pid=160, masterSystemTime=1732163087747 2024-11-21T04:24:47,763 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:24:47,764 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1114): Opened 76f43002dfe401371e684ca073e36f8c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71650129, jitterRate=0.06767012178897858}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:24:47,764 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:47,764 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1006): Region open journal for 76f43002dfe401371e684ca073e36f8c: Running coprocessor pre-open hook at 1732163087754Writing region info on filesystem at 1732163087754Initializing all the Stores at 1732163087755 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163087755Cleaning up temporary data from old regions at 1732163087760 (+5 ms)Running coprocessor post-open hooks at 1732163087764 (+4 ms)Region opened successfully at 1732163087764 2024-11-21T04:24:47,765 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:47,765 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:47,765 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c., pid=161, masterSystemTime=1732163087750 2024-11-21T04:24:47,765 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=eeea7b63305943e96f26d7f622f19d8b, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:47,767 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:24:47,767 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:24:47,767 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=76f43002dfe401371e684ca073e36f8c, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:24:47,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure eeea7b63305943e96f26d7f622f19d8b, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:24:47,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=161, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure 76f43002dfe401371e684ca073e36f8c, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:24:47,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=158 2024-11-21T04:24:47,770 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=158, state=SUCCESS, hasLock=false; OpenRegionProcedure eeea7b63305943e96f26d7f622f19d8b, server=fdc0a393186c,42393,1732162938268 in 175 msec 2024-11-21T04:24:47,770 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=161, resume processing ppid=159 2024-11-21T04:24:47,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=eeea7b63305943e96f26d7f622f19d8b, ASSIGN in 335 msec 2024-11-21T04:24:47,770 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, ppid=159, state=SUCCESS, hasLock=false; OpenRegionProcedure 76f43002dfe401371e684ca073e36f8c, server=fdc0a393186c,39347,1732162938035 in 173 msec 2024-11-21T04:24:47,772 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=157 2024-11-21T04:24:47,772 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=76f43002dfe401371e684ca073e36f8c, ASSIGN in 335 msec 2024-11-21T04:24:47,772 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:24:47,772 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163087772"}]},"ts":"1732163087772"} 2024-11-21T04:24:47,774 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-21T04:24:47,775 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:24:47,775 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-21T04:24:47,777 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T04:24:47,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:47,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:47,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:47,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:24:47,834 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,834 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,834 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,834 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,834 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,834 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,834 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,835 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:24:47,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 423 msec 2024-11-21T04:24:48,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-21T04:24:48,042 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-21T04:24:48,042 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-11-21T04:24:48,042 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:48,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-11-21T04:24:48,046 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:48,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-11-21T04:24:48,047 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T04:24:48,049 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T04:24:48,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163088049 (current time:1732163088049). 2024-11-21T04:24:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:24:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-21T04:24:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:24:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c5459bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:48,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:48,051 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:48,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:48,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:48,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e9c29e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:48,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:48,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,053 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58980, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:48,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@718ec757, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:48,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:48,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:48,056 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49274, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:48,057 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:24:48,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:48,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,057 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:48,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24d376c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:48,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:48,059 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:48,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:48,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:48,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3044030c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:48,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:48,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,060 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58996, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:48,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d1cd367, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:48,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:48,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:48,062 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49280, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:48,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:48,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:48,065 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53134, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:48,066 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:24:48,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:48,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,066 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:48,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T04:24:48,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:24:48,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T04:24:48,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-21T04:24:48,069 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:24:48,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-21T04:24:48,069 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:24:48,071 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:24:48,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742169_1345 (size=185) 2024-11-21T04:24:48,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742169_1345 (size=185) 2024-11-21T04:24:48,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742169_1345 (size=185) 2024-11-21T04:24:48,079 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:24:48,079 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eeea7b63305943e96f26d7f622f19d8b}, {pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 76f43002dfe401371e684ca073e36f8c}] 2024-11-21T04:24:48,080 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:48,080 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:48,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-21T04:24:48,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=163 2024-11-21T04:24:48,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=164 2024-11-21T04:24:48,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:48,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:24:48,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.HRegion(2603): Flush status journal for 76f43002dfe401371e684ca073e36f8c: 2024-11-21T04:24:48,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.HRegion(2603): Flush status journal for eeea7b63305943e96f26d7f622f19d8b: 2024-11-21T04:24:48,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-21T04:24:48,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-21T04:24:48,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:48,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:48,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:24:48,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:24:48,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742170_1346 (size=76) 2024-11-21T04:24:48,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742171_1347 (size=76) 2024-11-21T04:24:48,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742170_1346 (size=76) 2024-11-21T04:24:48,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742170_1346 (size=76) 2024-11-21T04:24:48,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742171_1347 (size=76) 2024-11-21T04:24:48,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742171_1347 (size=76) 2024-11-21T04:24:48,247 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:24:48,247 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-21T04:24:48,247 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:48,247 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=163 2024-11-21T04:24:48,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=163 2024-11-21T04:24:48,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=164 2024-11-21T04:24:48,248 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:48,248 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:48,248 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:48,248 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:48,250 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure eeea7b63305943e96f26d7f622f19d8b in 170 msec 2024-11-21T04:24:48,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=164, resume processing ppid=162 2024-11-21T04:24:48,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 76f43002dfe401371e684ca073e36f8c in 170 msec 2024-11-21T04:24:48,251 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:24:48,252 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:24:48,253 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:24:48,253 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,253 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742172_1348 (size=567) 2024-11-21T04:24:48,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742172_1348 (size=567) 2024-11-21T04:24:48,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742172_1348 (size=567) 2024-11-21T04:24:48,266 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:24:48,271 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:24:48,272 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,275 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:24:48,275 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-21T04:24:48,276 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 208 msec 2024-11-21T04:24:48,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-21T04:24:48,383 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-21T04:24:48,390 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='0876cb71ffef79b9b61e23c183ff0e097', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:48,392 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='1b5735edce22d137efb37358698c9835b', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c., hostname=fdc0a393186c,39347,1732162938035, seqNum=2] 2024-11-21T04:24:48,394 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='2dea8d07e6ed49426dab30c45937c7a21', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c., hostname=fdc0a393186c,39347,1732162938035, seqNum=2] 2024-11-21T04:24:48,396 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='3083cf9418b2adb2f58c7bbcb6737612e', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c., hostname=fdc0a393186c,39347,1732162938035, seqNum=2] 2024-11-21T04:24:48,403 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42393 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:24:48,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39347 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:24:48,406 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T04:24:48,408 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-21T04:24:48,408 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:48,408 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:24:48,410 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T04:24:48,414 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T04:24:48,420 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-21T04:24:48,422 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T04:24:48,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163088422 (current time:1732163088422). 2024-11-21T04:24:48,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:24:48,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-21T04:24:48,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:24:48,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38ac8bcf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:48,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:48,424 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:48,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:48,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:48,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fe73855, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:48,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:48,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,425 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59022, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:48,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@213491e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:48,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:48,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:48,428 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49292, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:48,429 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:24:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,429 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66a6ec83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:24:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:24:48,430 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:24:48,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:24:48,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:24:48,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b1e581e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:24:48,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:24:48,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,431 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59050, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:24:48,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d89df9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:24:48,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:24:48,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:24:48,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:48,434 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49294, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:48,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:24:48,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:24:48,436 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53148, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:24:48,437 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:24:48,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:24:48,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:24:48,437 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:24:48,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-21T04:24:48,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:24:48,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-21T04:24:48,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-21T04:24:48,440 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:24:48,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-21T04:24:48,441 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:24:48,443 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:24:48,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742173_1349 (size=180) 2024-11-21T04:24:48,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742173_1349 (size=180) 2024-11-21T04:24:48,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742173_1349 (size=180) 2024-11-21T04:24:48,449 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:24:48,449 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eeea7b63305943e96f26d7f622f19d8b}, {pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 76f43002dfe401371e684ca073e36f8c}] 2024-11-21T04:24:48,450 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:48,450 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:48,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-21T04:24:48,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=166 2024-11-21T04:24:48,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=167 2024-11-21T04:24:48,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:48,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:24:48,602 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2902): Flushing eeea7b63305943e96f26d7f622f19d8b 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-21T04:24:48,602 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2902): Flushing 76f43002dfe401371e684ca073e36f8c 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-21T04:24:48,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/.tmp/cf/f82988e1d6604d8f84d04695d5d1f5dc is 69, key is 0876cb71ffef79b9b61e23c183ff0e097/cf:q/1732163088403/Put/seqid=0 2024-11-21T04:24:48,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/.tmp/cf/353781b077f74376831aa344d2cf6bce is 71, key is 12cfa93fce8112f710fdd4007b68b9fc/cf:q/1732163088404/Put/seqid=0 2024-11-21T04:24:48,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742174_1350 (size=5149) 2024-11-21T04:24:48,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742174_1350 (size=5149) 2024-11-21T04:24:48,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742174_1350 (size=5149) 2024-11-21T04:24:48,641 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/.tmp/cf/f82988e1d6604d8f84d04695d5d1f5dc 2024-11-21T04:24:48,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/.tmp/cf/f82988e1d6604d8f84d04695d5d1f5dc as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/cf/f82988e1d6604d8f84d04695d5d1f5dc 2024-11-21T04:24:48,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742175_1351 (size=8460) 2024-11-21T04:24:48,653 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/cf/f82988e1d6604d8f84d04695d5d1f5dc, entries=1, sequenceid=6, filesize=5.0 K 2024-11-21T04:24:48,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742175_1351 (size=8460) 2024-11-21T04:24:48,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742175_1351 (size=8460) 2024-11-21T04:24:48,654 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for eeea7b63305943e96f26d7f622f19d8b in 52ms, sequenceid=6, compaction requested=false 2024-11-21T04:24:48,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-21T04:24:48,655 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/.tmp/cf/353781b077f74376831aa344d2cf6bce 2024-11-21T04:24:48,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2603): Flush status journal for eeea7b63305943e96f26d7f622f19d8b: 2024-11-21T04:24:48,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-21T04:24:48,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:48,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/cf/f82988e1d6604d8f84d04695d5d1f5dc] hfiles 2024-11-21T04:24:48,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/cf/f82988e1d6604d8f84d04695d5d1f5dc for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/.tmp/cf/353781b077f74376831aa344d2cf6bce as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/cf/353781b077f74376831aa344d2cf6bce 2024-11-21T04:24:48,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742176_1352 (size=115) 2024-11-21T04:24:48,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742176_1352 (size=115) 2024-11-21T04:24:48,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742176_1352 (size=115) 2024-11-21T04:24:48,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:24:48,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-21T04:24:48,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=166 2024-11-21T04:24:48,665 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:48,665 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:24:48,666 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/cf/353781b077f74376831aa344d2cf6bce, entries=49, sequenceid=6, filesize=8.3 K 2024-11-21T04:24:48,667 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 76f43002dfe401371e684ca073e36f8c in 65ms, sequenceid=6, compaction requested=false 2024-11-21T04:24:48,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2603): Flush status journal for 76f43002dfe401371e684ca073e36f8c: 2024-11-21T04:24:48,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-21T04:24:48,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:24:48,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/cf/353781b077f74376831aa344d2cf6bce] hfiles 2024-11-21T04:24:48,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/cf/353781b077f74376831aa344d2cf6bce for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,667 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure eeea7b63305943e96f26d7f622f19d8b in 217 msec 2024-11-21T04:24:48,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742177_1353 (size=115) 2024-11-21T04:24:48,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742177_1353 (size=115) 2024-11-21T04:24:48,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742177_1353 (size=115) 2024-11-21T04:24:48,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:24:48,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=167 2024-11-21T04:24:48,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=167 2024-11-21T04:24:48,675 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:48,675 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:24:48,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=165 2024-11-21T04:24:48,678 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:24:48,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 76f43002dfe401371e684ca073e36f8c in 227 msec 2024-11-21T04:24:48,679 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:24:48,679 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:24:48,679 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,680 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742178_1354 (size=645) 2024-11-21T04:24:48,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742178_1354 (size=645) 2024-11-21T04:24:48,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742178_1354 (size=645) 2024-11-21T04:24:48,690 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:24:48,695 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:24:48,696 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,697 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:24:48,697 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-21T04:24:48,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 259 msec 2024-11-21T04:24:48,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-21T04:24:48,752 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-21T04:24:48,752 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163088752 2024-11-21T04:24:48,753 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44067, tgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163088752, rawTgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163088752, srcFsUri=hdfs://localhost:44067, srcDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:48,782 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44067, inputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:24:48,782 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163088752, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163088752/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,784 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T04:24:48,788 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163088752/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:48,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742180_1356 (size=567) 2024-11-21T04:24:48,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742180_1356 (size=567) 2024-11-21T04:24:48,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742180_1356 (size=567) 2024-11-21T04:24:48,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742179_1355 (size=185) 2024-11-21T04:24:48,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742179_1355 (size=185) 2024-11-21T04:24:48,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742179_1355 (size=185) 2024-11-21T04:24:48,813 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:48,813 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:48,813 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:49,718 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-1525313963387112609.jar 2024-11-21T04:24:49,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:49,719 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:49,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-8122028996046916649.jar 2024-11-21T04:24:49,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:49,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:49,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:49,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:49,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:49,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:24:49,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T04:24:49,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T04:24:49,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T04:24:49,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T04:24:49,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T04:24:49,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T04:24:49,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T04:24:49,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T04:24:49,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T04:24:49,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T04:24:49,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T04:24:49,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:24:49,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:24:49,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:24:49,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:24:49,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:24:49,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:24:49,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:24:49,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742181_1357 (size=131440) 2024-11-21T04:24:49,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742181_1357 (size=131440) 2024-11-21T04:24:49,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742181_1357 (size=131440) 2024-11-21T04:24:49,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742182_1358 (size=4188619) 2024-11-21T04:24:49,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742182_1358 (size=4188619) 2024-11-21T04:24:49,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742182_1358 (size=4188619) 2024-11-21T04:24:49,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742183_1359 (size=1323991) 2024-11-21T04:24:49,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742183_1359 (size=1323991) 2024-11-21T04:24:49,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742183_1359 (size=1323991) 2024-11-21T04:24:49,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742184_1360 (size=903734) 2024-11-21T04:24:49,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742184_1360 (size=903734) 2024-11-21T04:24:49,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742184_1360 (size=903734) 2024-11-21T04:24:49,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742185_1361 (size=8360083) 2024-11-21T04:24:49,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742185_1361 (size=8360083) 2024-11-21T04:24:49,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742185_1361 (size=8360083) 2024-11-21T04:24:49,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742186_1362 (size=1877034) 2024-11-21T04:24:49,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742186_1362 (size=1877034) 2024-11-21T04:24:49,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742186_1362 (size=1877034) 2024-11-21T04:24:49,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742187_1363 (size=77835) 2024-11-21T04:24:49,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742187_1363 (size=77835) 2024-11-21T04:24:49,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742187_1363 (size=77835) 2024-11-21T04:24:49,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742188_1364 (size=30949) 2024-11-21T04:24:49,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742188_1364 (size=30949) 2024-11-21T04:24:49,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742188_1364 (size=30949) 2024-11-21T04:24:49,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742189_1365 (size=1597327) 2024-11-21T04:24:49,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742189_1365 (size=1597327) 2024-11-21T04:24:49,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742189_1365 (size=1597327) 2024-11-21T04:24:50,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742190_1366 (size=4695811) 2024-11-21T04:24:50,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742190_1366 (size=4695811) 2024-11-21T04:24:50,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742190_1366 (size=4695811) 2024-11-21T04:24:50,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742191_1367 (size=232957) 2024-11-21T04:24:50,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742191_1367 (size=232957) 2024-11-21T04:24:50,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742191_1367 (size=232957) 2024-11-21T04:24:50,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742192_1368 (size=127628) 2024-11-21T04:24:50,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742192_1368 (size=127628) 2024-11-21T04:24:50,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742192_1368 (size=127628) 2024-11-21T04:24:50,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742193_1369 (size=20406) 2024-11-21T04:24:50,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742193_1369 (size=20406) 2024-11-21T04:24:50,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742193_1369 (size=20406) 2024-11-21T04:24:50,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742194_1370 (size=5175431) 2024-11-21T04:24:50,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742194_1370 (size=5175431) 2024-11-21T04:24:50,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742194_1370 (size=5175431) 2024-11-21T04:24:50,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742195_1371 (size=217634) 2024-11-21T04:24:50,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742195_1371 (size=217634) 2024-11-21T04:24:50,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742195_1371 (size=217634) 2024-11-21T04:24:50,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742196_1372 (size=1832290) 2024-11-21T04:24:50,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742196_1372 (size=1832290) 2024-11-21T04:24:50,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742196_1372 (size=1832290) 2024-11-21T04:24:50,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742197_1373 (size=322274) 2024-11-21T04:24:50,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742197_1373 (size=322274) 2024-11-21T04:24:50,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742197_1373 (size=322274) 2024-11-21T04:24:50,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742198_1374 (size=503880) 2024-11-21T04:24:50,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742198_1374 (size=503880) 2024-11-21T04:24:50,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742198_1374 (size=503880) 2024-11-21T04:24:50,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742199_1375 (size=6424740) 2024-11-21T04:24:50,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742199_1375 (size=6424740) 2024-11-21T04:24:50,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742199_1375 (size=6424740) 2024-11-21T04:24:50,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742200_1376 (size=29229) 2024-11-21T04:24:50,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742200_1376 (size=29229) 2024-11-21T04:24:50,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742200_1376 (size=29229) 2024-11-21T04:24:50,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742201_1377 (size=24096) 2024-11-21T04:24:50,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742201_1377 (size=24096) 2024-11-21T04:24:50,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742201_1377 (size=24096) 2024-11-21T04:24:50,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742202_1378 (size=111872) 2024-11-21T04:24:50,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742202_1378 (size=111872) 2024-11-21T04:24:50,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742202_1378 (size=111872) 2024-11-21T04:24:50,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742203_1379 (size=440656) 2024-11-21T04:24:50,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742203_1379 (size=440656) 2024-11-21T04:24:50,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742203_1379 (size=440656) 2024-11-21T04:24:50,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742204_1380 (size=45609) 2024-11-21T04:24:50,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742204_1380 (size=45609) 2024-11-21T04:24:50,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742204_1380 (size=45609) 2024-11-21T04:24:50,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742205_1381 (size=136454) 2024-11-21T04:24:50,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742205_1381 (size=136454) 2024-11-21T04:24:50,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742205_1381 (size=136454) 2024-11-21T04:24:50,701 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T04:24:50,704 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-21T04:24:50,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742206_1382 (size=7) 2024-11-21T04:24:50,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742206_1382 (size=7) 2024-11-21T04:24:50,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742206_1382 (size=7) 2024-11-21T04:24:50,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742207_1383 (size=10) 2024-11-21T04:24:50,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742207_1383 (size=10) 2024-11-21T04:24:50,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742207_1383 (size=10) 2024-11-21T04:24:50,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742208_1384 (size=303635) 2024-11-21T04:24:50,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742208_1384 (size=303635) 2024-11-21T04:24:50,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742208_1384 (size=303635) 2024-11-21T04:24:50,756 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:24:50,756 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:24:51,128 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0007_000001 (auth:SIMPLE) from 127.0.0.1:43028 2024-11-21T04:24:52,462 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:24:57,553 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0007_000001 (auth:SIMPLE) from 127.0.0.1:35650 2024-11-21T04:24:57,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-21T04:24:57,560 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-21T04:24:58,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742209_1385 (size=349261) 2024-11-21T04:24:58,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742209_1385 (size=349261) 2024-11-21T04:24:58,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742209_1385 (size=349261) 2024-11-21T04:24:58,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742210_1386 (size=8568) 2024-11-21T04:24:58,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742210_1386 (size=8568) 2024-11-21T04:24:58,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742210_1386 (size=8568) 2024-11-21T04:24:58,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742211_1387 (size=460) 2024-11-21T04:24:58,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742211_1387 (size=460) 2024-11-21T04:24:58,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742211_1387 (size=460) 2024-11-21T04:24:58,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742212_1388 (size=8568) 2024-11-21T04:24:58,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742212_1388 (size=8568) 2024-11-21T04:24:58,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742212_1388 (size=8568) 2024-11-21T04:24:58,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742213_1389 (size=349261) 2024-11-21T04:24:58,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742213_1389 (size=349261) 2024-11-21T04:24:58,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742213_1389 (size=349261) 2024-11-21T04:24:59,950 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T04:24:59,951 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T04:24:59,957 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:59,957 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T04:24:59,958 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T04:24:59,958 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:59,958 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-21T04:24:59,958 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-21T04:24:59,958 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163088752/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163088752/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T04:24:59,959 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163088752/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-21T04:24:59,959 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163088752/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-21T04:24:59,972 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-11-21T04:24:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T04:24:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-21T04:24:59,977 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163099977"}]},"ts":"1732163099977"} 2024-11-21T04:24:59,980 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-21T04:24:59,980 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-21T04:24:59,981 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-21T04:24:59,983 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=eeea7b63305943e96f26d7f622f19d8b, UNASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=76f43002dfe401371e684ca073e36f8c, UNASSIGN}] 2024-11-21T04:24:59,984 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=76f43002dfe401371e684ca073e36f8c, UNASSIGN 2024-11-21T04:24:59,985 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=eeea7b63305943e96f26d7f622f19d8b, UNASSIGN 2024-11-21T04:24:59,985 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=76f43002dfe401371e684ca073e36f8c, regionState=CLOSING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:24:59,986 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=eeea7b63305943e96f26d7f622f19d8b, regionState=CLOSING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:24:59,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=76f43002dfe401371e684ca073e36f8c, UNASSIGN because future has completed 2024-11-21T04:24:59,988 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:24:59,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure 76f43002dfe401371e684ca073e36f8c, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:24:59,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=eeea7b63305943e96f26d7f622f19d8b, UNASSIGN because future has completed 2024-11-21T04:24:59,990 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:24:59,990 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure eeea7b63305943e96f26d7f622f19d8b, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:25:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-21T04:25:00,141 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(122): Close 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:25:00,141 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:25:00,141 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1722): Closing 76f43002dfe401371e684ca073e36f8c, disabling compactions & flushes 2024-11-21T04:25:00,141 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:25:00,141 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:25:00,141 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. after waiting 0 ms 2024-11-21T04:25:00,142 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:25:00,143 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(122): Close eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:25:00,143 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:25:00,143 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1722): Closing eeea7b63305943e96f26d7f622f19d8b, disabling compactions & flushes 2024-11-21T04:25:00,143 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:25:00,143 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:25:00,143 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. after waiting 0 ms 2024-11-21T04:25:00,143 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:25:00,150 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:25:00,151 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:25:00,151 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c. 2024-11-21T04:25:00,151 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1676): Region close journal for 76f43002dfe401371e684ca073e36f8c: Waiting for close lock at 1732163100141Running coprocessor pre-close hooks at 1732163100141Disabling compacts and flushes for region at 1732163100141Disabling writes for close at 1732163100141Writing region close event to WAL at 1732163100142 (+1 ms)Running coprocessor post-close hooks at 1732163100151 (+9 ms)Closed at 1732163100151 2024-11-21T04:25:00,152 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:25:00,153 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:25:00,153 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b. 2024-11-21T04:25:00,153 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1676): Region close journal for eeea7b63305943e96f26d7f622f19d8b: Waiting for close lock at 1732163100143Running coprocessor pre-close hooks at 1732163100143Disabling compacts and flushes for region at 1732163100143Disabling writes for close at 1732163100143Writing region close event to WAL at 1732163100147 (+4 ms)Running coprocessor post-close hooks at 1732163100153 (+6 ms)Closed at 1732163100153 2024-11-21T04:25:00,155 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(157): Closed 76f43002dfe401371e684ca073e36f8c 2024-11-21T04:25:00,156 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=76f43002dfe401371e684ca073e36f8c, regionState=CLOSED 2024-11-21T04:25:00,156 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(157): Closed eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:25:00,157 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=eeea7b63305943e96f26d7f622f19d8b, regionState=CLOSED 2024-11-21T04:25:00,159 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure 76f43002dfe401371e684ca073e36f8c, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:25:00,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure eeea7b63305943e96f26d7f622f19d8b, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:25:00,163 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-11-21T04:25:00,163 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; CloseRegionProcedure 76f43002dfe401371e684ca073e36f8c, server=fdc0a393186c,39347,1732162938035 in 172 msec 2024-11-21T04:25:00,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-11-21T04:25:00,165 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=76f43002dfe401371e684ca073e36f8c, UNASSIGN in 180 msec 2024-11-21T04:25:00,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; CloseRegionProcedure eeea7b63305943e96f26d7f622f19d8b, server=fdc0a393186c,42393,1732162938268 in 172 msec 2024-11-21T04:25:00,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-11-21T04:25:00,166 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=eeea7b63305943e96f26d7f622f19d8b, UNASSIGN in 182 msec 2024-11-21T04:25:00,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=169, resume processing ppid=168 2024-11-21T04:25:00,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, ppid=168, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 186 msec 2024-11-21T04:25:00,173 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163100173"}]},"ts":"1732163100173"} 2024-11-21T04:25:00,175 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-21T04:25:00,175 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-21T04:25:00,177 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 204 msec 2024-11-21T04:25:00,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-21T04:25:00,292 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-21T04:25:00,293 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,295 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,296 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=174, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,300 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c 2024-11-21T04:25:00,300 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:25:00,302 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/recovered.edits] 2024-11-21T04:25:00,302 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/recovered.edits] 2024-11-21T04:25:00,305 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/cf/f82988e1d6604d8f84d04695d5d1f5dc to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/cf/f82988e1d6604d8f84d04695d5d1f5dc 2024-11-21T04:25:00,306 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/cf/353781b077f74376831aa344d2cf6bce to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/cf/353781b077f74376831aa344d2cf6bce 2024-11-21T04:25:00,310 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b/recovered.edits/9.seqid 2024-11-21T04:25:00,310 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c/recovered.edits/9.seqid 2024-11-21T04:25:00,310 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/eeea7b63305943e96f26d7f622f19d8b 2024-11-21T04:25:00,310 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testEmptyExportFileSystemState/76f43002dfe401371e684ca073e36f8c 2024-11-21T04:25:00,310 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-21T04:25:00,320 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=174, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,324 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-21T04:25:00,327 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-21T04:25:00,329 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=174, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,329 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-21T04:25:00,329 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163100329"}]},"ts":"9223372036854775807"} 2024-11-21T04:25:00,329 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163100329"}]},"ts":"9223372036854775807"} 2024-11-21T04:25:00,333 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T04:25:00,333 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => eeea7b63305943e96f26d7f622f19d8b, NAME => 'testtb-testEmptyExportFileSystemState,,1732163087409.eeea7b63305943e96f26d7f622f19d8b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 76f43002dfe401371e684ca073e36f8c, NAME => 'testtb-testEmptyExportFileSystemState,1,1732163087409.76f43002dfe401371e684ca073e36f8c.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T04:25:00,333 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-21T04:25:00,333 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732163100333"}]},"ts":"9223372036854775807"} 2024-11-21T04:25:00,336 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-21T04:25:00,337 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=174, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,338 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 44 msec 2024-11-21T04:25:00,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-21T04:25:00,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-21T04:25:00,373 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-21T04:25:00,373 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-21T04:25:00,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:00,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:00,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:00,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:00,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-21T04:25:00,382 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:00,383 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:00,383 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-21T04:25:00,383 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-21T04:25:00,384 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:00,384 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:00,397 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-21T04:25:00,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-21T04:25:00,406 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-21T04:25:00,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-21T04:25:00,454 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=812 (was 799) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:55790 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:53678 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 131716) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43293 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-996390987_1 at /127.0.0.1:53668 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-996390987_1 at /127.0.0.1:48688 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:48716 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5185 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (625843813) connection to localhost/127.0.0.1:43293 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (625843813) connection to localhost/127.0.0.1:42623 from appattempt_1732162945431_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=819 (was 789) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=453 (was 432) - SystemLoadAverage LEAK? -, ProcessCount=18 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=2219 (was 1534) - AvailableMemoryMB LEAK? - 2024-11-21T04:25:00,454 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-11-21T04:25:00,474 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=812, OpenFileDescriptor=819, MaxFileDescriptor=1048576, SystemLoadAverage=453, ProcessCount=18, AvailableMemoryMB=2218 2024-11-21T04:25:00,474 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=812 is superior to 500 2024-11-21T04:25:00,476 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:25:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-21T04:25:00,478 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:25:00,478 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:25:00,478 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 175 2024-11-21T04:25:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-21T04:25:00,479 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:25:00,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742214_1390 (size=404) 2024-11-21T04:25:00,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742214_1390 (size=404) 2024-11-21T04:25:00,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742214_1390 (size=404) 2024-11-21T04:25:00,503 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d6599fc11fc065b339177b6c0b07cb1c, NAME => 'testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:25:00,505 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 445ec8afc5baaba6cc3771e00cfb74bb, NAME => 'testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:25:00,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742215_1391 (size=65) 2024-11-21T04:25:00,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742215_1391 (size=65) 2024-11-21T04:25:00,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742215_1391 (size=65) 2024-11-21T04:25:00,535 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:25:00,535 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing d6599fc11fc065b339177b6c0b07cb1c, disabling compactions & flushes 2024-11-21T04:25:00,535 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:00,535 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:00,535 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. after waiting 0 ms 2024-11-21T04:25:00,535 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:00,535 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:00,535 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for d6599fc11fc065b339177b6c0b07cb1c: Waiting for close lock at 1732163100535Disabling compacts and flushes for region at 1732163100535Disabling writes for close at 1732163100535Writing region close event to WAL at 1732163100535Closed at 1732163100535 2024-11-21T04:25:00,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742216_1392 (size=65) 2024-11-21T04:25:00,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742216_1392 (size=65) 2024-11-21T04:25:00,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742216_1392 (size=65) 2024-11-21T04:25:00,547 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:25:00,547 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 445ec8afc5baaba6cc3771e00cfb74bb, disabling compactions & flushes 2024-11-21T04:25:00,547 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:00,547 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:00,547 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. after waiting 0 ms 2024-11-21T04:25:00,548 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:00,548 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:00,548 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 445ec8afc5baaba6cc3771e00cfb74bb: Waiting for close lock at 1732163100547Disabling compacts and flushes for region at 1732163100547Disabling writes for close at 1732163100548 (+1 ms)Writing region close event to WAL at 1732163100548Closed at 1732163100548 2024-11-21T04:25:00,549 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:25:00,549 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732163100549"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163100549"}]},"ts":"1732163100549"} 2024-11-21T04:25:00,550 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732163100549"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163100549"}]},"ts":"1732163100549"} 2024-11-21T04:25:00,553 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:25:00,553 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:25:00,554 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163100554"}]},"ts":"1732163100554"} 2024-11-21T04:25:00,555 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-21T04:25:00,556 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:25:00,557 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:25:00,557 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:25:00,557 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:25:00,557 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:25:00,557 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:25:00,557 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:25:00,557 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:25:00,557 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:25:00,557 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:25:00,557 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:25:00,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d6599fc11fc065b339177b6c0b07cb1c, ASSIGN}, {pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=445ec8afc5baaba6cc3771e00cfb74bb, ASSIGN}] 2024-11-21T04:25:00,559 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=445ec8afc5baaba6cc3771e00cfb74bb, ASSIGN 2024-11-21T04:25:00,559 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d6599fc11fc065b339177b6c0b07cb1c, ASSIGN 2024-11-21T04:25:00,560 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=445ec8afc5baaba6cc3771e00cfb74bb, ASSIGN; state=OFFLINE, location=fdc0a393186c,34363,1732162938185; forceNewPlan=false, retain=false 2024-11-21T04:25:00,561 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d6599fc11fc065b339177b6c0b07cb1c, ASSIGN; state=OFFLINE, location=fdc0a393186c,42393,1732162938268; forceNewPlan=false, retain=false 2024-11-21T04:25:00,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-21T04:25:00,711 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:25:00,711 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=d6599fc11fc065b339177b6c0b07cb1c, regionState=OPENING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:25:00,711 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=445ec8afc5baaba6cc3771e00cfb74bb, regionState=OPENING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:25:00,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d6599fc11fc065b339177b6c0b07cb1c, ASSIGN because future has completed 2024-11-21T04:25:00,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure d6599fc11fc065b339177b6c0b07cb1c, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:25:00,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=445ec8afc5baaba6cc3771e00cfb74bb, ASSIGN because future has completed 2024-11-21T04:25:00,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=179, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:25:00,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-21T04:25:00,872 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:00,872 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7752): Opening region: {ENCODED => d6599fc11fc065b339177b6c0b07cb1c, NAME => 'testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T04:25:00,873 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. service=AccessControlService 2024-11-21T04:25:00,873 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:25:00,873 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:00,873 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:25:00,873 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7794): checking encryption for d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:00,873 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7797): checking classloading for d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:00,883 INFO [StoreOpener-d6599fc11fc065b339177b6c0b07cb1c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:00,886 INFO [StoreOpener-d6599fc11fc065b339177b6c0b07cb1c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d6599fc11fc065b339177b6c0b07cb1c columnFamilyName cf 2024-11-21T04:25:00,886 DEBUG [StoreOpener-d6599fc11fc065b339177b6c0b07cb1c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:25:00,887 INFO [StoreOpener-d6599fc11fc065b339177b6c0b07cb1c-1 {}] regionserver.HStore(327): Store=d6599fc11fc065b339177b6c0b07cb1c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:25:00,888 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1038): replaying wal for d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:00,889 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:00,889 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:00,889 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1048): stopping wal replay for d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:00,889 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1060): Cleaning up temporary data for d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:00,891 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1093): writing seq id for d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:00,896 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:25:00,896 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1114): Opened d6599fc11fc065b339177b6c0b07cb1c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59621313, jitterRate=-0.11157320439815521}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:25:00,896 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:00,897 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1006): Region open journal for d6599fc11fc065b339177b6c0b07cb1c: Running coprocessor pre-open hook at 1732163100873Writing region info on filesystem at 1732163100873Initializing all the Stores at 1732163100874 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163100874Cleaning up temporary data from old regions at 1732163100889 (+15 ms)Running coprocessor post-open hooks at 1732163100896 (+7 ms)Region opened successfully at 1732163100897 (+1 ms) 2024-11-21T04:25:00,898 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c., pid=178, masterSystemTime=1732163100868 2024-11-21T04:25:00,902 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=d6599fc11fc065b339177b6c0b07cb1c, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:25:00,906 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:00,906 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:00,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=178, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure d6599fc11fc065b339177b6c0b07cb1c, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:25:00,909 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:00,910 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7752): Opening region: {ENCODED => 445ec8afc5baaba6cc3771e00cfb74bb, NAME => 'testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T04:25:00,910 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. service=AccessControlService 2024-11-21T04:25:00,910 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:25:00,910 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:00,910 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:25:00,910 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7794): checking encryption for 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:00,910 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7797): checking classloading for 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:00,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=176 2024-11-21T04:25:00,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=176, state=SUCCESS, hasLock=false; OpenRegionProcedure d6599fc11fc065b339177b6c0b07cb1c, server=fdc0a393186c,42393,1732162938268 in 193 msec 2024-11-21T04:25:00,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d6599fc11fc065b339177b6c0b07cb1c, ASSIGN in 357 msec 2024-11-21T04:25:00,924 INFO [StoreOpener-445ec8afc5baaba6cc3771e00cfb74bb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:00,925 INFO [StoreOpener-445ec8afc5baaba6cc3771e00cfb74bb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 445ec8afc5baaba6cc3771e00cfb74bb columnFamilyName cf 2024-11-21T04:25:00,925 DEBUG [StoreOpener-445ec8afc5baaba6cc3771e00cfb74bb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:25:00,926 INFO [StoreOpener-445ec8afc5baaba6cc3771e00cfb74bb-1 {}] regionserver.HStore(327): Store=445ec8afc5baaba6cc3771e00cfb74bb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:25:00,926 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1038): replaying wal for 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:00,927 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:00,927 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:00,928 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1048): stopping wal replay for 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:00,928 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1060): Cleaning up temporary data for 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:00,930 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1093): writing seq id for 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:00,943 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:25:00,944 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1114): Opened 445ec8afc5baaba6cc3771e00cfb74bb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62918920, jitterRate=-0.062435030937194824}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:25:00,944 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:00,944 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1006): Region open journal for 445ec8afc5baaba6cc3771e00cfb74bb: Running coprocessor pre-open hook at 1732163100911Writing region info on filesystem at 1732163100911Initializing all the Stores at 1732163100919 (+8 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163100919Cleaning up temporary data from old regions at 1732163100928 (+9 ms)Running coprocessor post-open hooks at 1732163100944 (+16 ms)Region opened successfully at 1732163100944 2024-11-21T04:25:00,945 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb., pid=179, masterSystemTime=1732163100877 2024-11-21T04:25:00,948 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:00,948 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:00,949 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=445ec8afc5baaba6cc3771e00cfb74bb, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:25:00,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=179, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:25:00,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-11-21T04:25:00,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; OpenRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb, server=fdc0a393186c,34363,1732162938185 in 235 msec 2024-11-21T04:25:00,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=177, resume processing ppid=175 2024-11-21T04:25:00,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=445ec8afc5baaba6cc3771e00cfb74bb, ASSIGN in 397 msec 2024-11-21T04:25:00,959 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:25:00,959 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163100959"}]},"ts":"1732163100959"} 2024-11-21T04:25:00,962 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-21T04:25:00,963 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:25:00,963 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-21T04:25:00,968 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-21T04:25:01,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:01,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:01,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:01,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:01,061 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:01,061 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:01,061 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:01,062 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:01,062 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:01,062 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:01,064 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:01,064 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:01,065 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 583 msec 2024-11-21T04:25:01,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-21T04:25:01,102 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-21T04:25:01,102 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-11-21T04:25:01,103 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:25:01,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-11-21T04:25:01,109 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:25:01,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-11-21T04:25:01,109 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-21T04:25:01,113 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-21T04:25:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163101113 (current time:1732163101113). 2024-11-21T04:25:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:25:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-21T04:25:01,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:25:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e9269b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:25:01,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:25:01,116 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:25:01,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:25:01,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:25:01,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798b60d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:25:01,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:25:01,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,118 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34164, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:25:01,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d5f28eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:25:01,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:25:01,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:01,121 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52440, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:01,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:25:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:25:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5765c717, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:25:01,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:25:01,125 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:25:01,126 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:25:01,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:25:01,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:25:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63d6f1c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:25:01,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:25:01,139 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:25:01,140 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23ac155f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:25:01,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:25:01,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:01,144 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52444, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:01,146 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:01,146 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:01,148 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38128, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:01,149 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:25:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:25:01,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-21T04:25:01,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:25:01,151 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:25:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-21T04:25:01,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-21T04:25:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-21T04:25:01,155 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:25:01,157 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:25:01,162 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:25:01,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742217_1393 (size=161) 2024-11-21T04:25:01,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742217_1393 (size=161) 2024-11-21T04:25:01,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742217_1393 (size=161) 2024-11-21T04:25:01,207 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:25:01,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6599fc11fc065b339177b6c0b07cb1c}, {pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb}] 2024-11-21T04:25:01,208 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:01,208 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-21T04:25:01,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=182 2024-11-21T04:25:01,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:01,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=181 2024-11-21T04:25:01,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2603): Flush status journal for 445ec8afc5baaba6cc3771e00cfb74bb: 2024-11-21T04:25:01,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. for emptySnaptb0-testExportWithChecksum completed. 2024-11-21T04:25:01,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-21T04:25:01,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:25:01,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:25:01,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:01,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2603): Flush status journal for d6599fc11fc065b339177b6c0b07cb1c: 2024-11-21T04:25:01,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. for emptySnaptb0-testExportWithChecksum completed. 2024-11-21T04:25:01,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-21T04:25:01,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:25:01,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:25:01,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742218_1394 (size=68) 2024-11-21T04:25:01,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742218_1394 (size=68) 2024-11-21T04:25:01,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742218_1394 (size=68) 2024-11-21T04:25:01,408 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:01,408 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-21T04:25:01,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=182 2024-11-21T04:25:01,408 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:01,408 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:01,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742219_1395 (size=68) 2024-11-21T04:25:01,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742219_1395 (size=68) 2024-11-21T04:25:01,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742219_1395 (size=68) 2024-11-21T04:25:01,413 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:01,413 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=181 2024-11-21T04:25:01,414 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb in 204 msec 2024-11-21T04:25:01,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=181 2024-11-21T04:25:01,414 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:01,414 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:01,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-11-21T04:25:01,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d6599fc11fc065b339177b6c0b07cb1c in 208 msec 2024-11-21T04:25:01,417 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:25:01,417 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:25:01,418 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:25:01,418 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-21T04:25:01,418 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-21T04:25:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742220_1396 (size=543) 2024-11-21T04:25:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742220_1396 (size=543) 2024-11-21T04:25:01,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742220_1396 (size=543) 2024-11-21T04:25:01,438 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:25:01,447 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:25:01,448 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-21T04:25:01,449 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:25:01,449 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-21T04:25:01,450 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 298 msec 2024-11-21T04:25:01,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-21T04:25:01,472 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-21T04:25:01,481 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='0f74ca8bb796f5d65d594ab291cfb1d20', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:01,482 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='286390eb7f72d43602f84afb0fc5127d7', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:25:01,483 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='18f6e26e5868dfff15af82dc83a1bf3c4', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:25:01,483 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='3651f54d07801b772b7e9b4c614a37c69', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:25:01,484 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='4964dfb5e04a913a9f970051ed6bedcbe', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:25:01,485 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='63a326a84807f29c9a8ebe40fde63da61', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:25:01,485 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='5323620b7dccc4aabeb1e75a79f2ac392', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:25:01,486 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='4ed61de4a8f3c493c22e54699d152a9c', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb., hostname=fdc0a393186c,34363,1732162938185, seqNum=2] 2024-11-21T04:25:01,493 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42393 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:25:01,495 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34363 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:25:01,503 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-21T04:25:01,507 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-21T04:25:01,508 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:01,508 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:25:01,510 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-21T04:25:01,522 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-21T04:25:01,542 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-21T04:25:01,548 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-21T04:25:01,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163101548 (current time:1732163101548). 2024-11-21T04:25:01,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:25:01,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-21T04:25:01,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:25:01,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b9a01cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:25:01,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:25:01,556 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:25:01,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:25:01,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:25:01,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f10cb16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:25:01,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:25:01,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,558 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34198, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:25:01,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c5b45ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:25:01,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:25:01,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:01,561 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52456, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:01,563 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:25:01,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:25:01,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,566 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:25:01,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f877861, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:25:01,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:25:01,584 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:25:01,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:25:01,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:25:01,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59d23594, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:25:01,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:25:01,586 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,587 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34216, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:25:01,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56bfcffe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:01,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:25:01,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:25:01,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:01,591 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52458, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:01,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:01,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:01,595 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38132, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:01,596 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:25:01,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:25:01,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:01,597 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:25:01,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-21T04:25:01,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:25:01,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-21T04:25:01,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-21T04:25:01,601 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:25:01,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-21T04:25:01,602 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:25:01,605 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:25:01,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742221_1397 (size=156) 2024-11-21T04:25:01,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742221_1397 (size=156) 2024-11-21T04:25:01,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742221_1397 (size=156) 2024-11-21T04:25:01,651 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:25:01,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6599fc11fc065b339177b6c0b07cb1c}, {pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb}] 2024-11-21T04:25:01,653 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:01,653 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:01,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-21T04:25:01,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=184 2024-11-21T04:25:01,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:01,807 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2902): Flushing d6599fc11fc065b339177b6c0b07cb1c 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-21T04:25:01,807 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34363 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=185 2024-11-21T04:25:01,807 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:01,807 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2902): Flushing 445ec8afc5baaba6cc3771e00cfb74bb 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-21T04:25:01,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/.tmp/cf/983ec68adf534945b6b4fb41dc2edf47 is 71, key is 015e7bf11b6f641205fd85462e7e066a/cf:q/1732163101493/Put/seqid=0 2024-11-21T04:25:01,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/.tmp/cf/34d83094789d420a8248c2adff2a99f7 is 71, key is 1968a6442f03dbbea8eaf87033346b9b/cf:q/1732163101494/Put/seqid=0 2024-11-21T04:25:01,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742222_1398 (size=5490) 2024-11-21T04:25:01,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742222_1398 (size=5490) 2024-11-21T04:25:01,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742222_1398 (size=5490) 2024-11-21T04:25:01,836 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/.tmp/cf/983ec68adf534945b6b4fb41dc2edf47 2024-11-21T04:25:01,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742223_1399 (size=8122) 2024-11-21T04:25:01,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742223_1399 (size=8122) 2024-11-21T04:25:01,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742223_1399 (size=8122) 2024-11-21T04:25:01,842 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/.tmp/cf/34d83094789d420a8248c2adff2a99f7 2024-11-21T04:25:01,843 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/.tmp/cf/983ec68adf534945b6b4fb41dc2edf47 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/cf/983ec68adf534945b6b4fb41dc2edf47 2024-11-21T04:25:01,847 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/cf/983ec68adf534945b6b4fb41dc2edf47, entries=6, sequenceid=6, filesize=5.4 K 2024-11-21T04:25:01,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/.tmp/cf/34d83094789d420a8248c2adff2a99f7 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7 2024-11-21T04:25:01,848 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for d6599fc11fc065b339177b6c0b07cb1c in 42ms, sequenceid=6, compaction requested=false 2024-11-21T04:25:01,848 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-21T04:25:01,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2603): Flush status journal for d6599fc11fc065b339177b6c0b07cb1c: 2024-11-21T04:25:01,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. for snaptb0-testExportWithChecksum completed. 2024-11-21T04:25:01,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-21T04:25:01,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:25:01,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/cf/983ec68adf534945b6b4fb41dc2edf47] hfiles 2024-11-21T04:25:01,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/cf/983ec68adf534945b6b4fb41dc2edf47 for snapshot=snaptb0-testExportWithChecksum 2024-11-21T04:25:01,853 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7, entries=44, sequenceid=6, filesize=7.9 K 2024-11-21T04:25:01,854 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 445ec8afc5baaba6cc3771e00cfb74bb in 47ms, sequenceid=6, compaction requested=false 2024-11-21T04:25:01,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2603): Flush status journal for 445ec8afc5baaba6cc3771e00cfb74bb: 2024-11-21T04:25:01,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. for snaptb0-testExportWithChecksum completed. 2024-11-21T04:25:01,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-21T04:25:01,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:25:01,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7] hfiles 2024-11-21T04:25:01,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7 for snapshot=snaptb0-testExportWithChecksum 2024-11-21T04:25:01,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742224_1400 (size=107) 2024-11-21T04:25:01,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742224_1400 (size=107) 2024-11-21T04:25:01,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742224_1400 (size=107) 2024-11-21T04:25:01,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:01,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-21T04:25:01,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=184 2024-11-21T04:25:01,857 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:01,857 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:01,861 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d6599fc11fc065b339177b6c0b07cb1c in 207 msec 2024-11-21T04:25:01,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742225_1401 (size=107) 2024-11-21T04:25:01,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742225_1401 (size=107) 2024-11-21T04:25:01,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742225_1401 (size=107) 2024-11-21T04:25:01,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:01,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=185 2024-11-21T04:25:01,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=185 2024-11-21T04:25:01,871 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:01,871 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:01,874 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=185, resume processing ppid=183 2024-11-21T04:25:01,874 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb in 221 msec 2024-11-21T04:25:01,874 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:25:01,875 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:25:01,875 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:25:01,876 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-21T04:25:01,876 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-21T04:25:01,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742226_1402 (size=621) 2024-11-21T04:25:01,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742226_1402 (size=621) 2024-11-21T04:25:01,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742226_1402 (size=621) 2024-11-21T04:25:01,888 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:25:01,894 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:25:01,894 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-21T04:25:01,896 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:25:01,896 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-21T04:25:01,897 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 298 msec 2024-11-21T04:25:01,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-21T04:25:01,922 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-21T04:25:01,922 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163101922 2024-11-21T04:25:01,922 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163101922, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163101922, srcFsUri=hdfs://localhost:44067, srcDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:25:01,960 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44067, inputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:25:01,960 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@fac06ab, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163101922, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163101922/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-21T04:25:01,961 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T04:25:01,965 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163101922/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-21T04:25:01,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:01,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:01,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:02,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-17864580873092013409.jar 2024-11-21T04:25:02,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:02,935 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:02,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-11529068772465355108.jar 2024-11-21T04:25:02,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:02,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:02,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:02,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:02,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:02,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:02,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T04:25:02,997 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T04:25:02,997 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T04:25:02,997 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T04:25:02,997 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T04:25:02,998 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T04:25:02,998 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T04:25:02,998 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T04:25:02,998 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T04:25:02,998 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T04:25:02,999 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T04:25:02,999 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:02,999 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:02,999 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:25:02,999 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:03,000 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:03,000 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:25:03,000 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:25:03,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742227_1403 (size=131440) 2024-11-21T04:25:03,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742227_1403 (size=131440) 2024-11-21T04:25:03,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742227_1403 (size=131440) 2024-11-21T04:25:03,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742228_1404 (size=4188619) 2024-11-21T04:25:03,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742228_1404 (size=4188619) 2024-11-21T04:25:03,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742228_1404 (size=4188619) 2024-11-21T04:25:03,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742229_1405 (size=1323991) 2024-11-21T04:25:03,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742229_1405 (size=1323991) 2024-11-21T04:25:03,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742229_1405 (size=1323991) 2024-11-21T04:25:03,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742230_1406 (size=903734) 2024-11-21T04:25:03,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742230_1406 (size=903734) 2024-11-21T04:25:03,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742230_1406 (size=903734) 2024-11-21T04:25:03,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742231_1407 (size=8360083) 2024-11-21T04:25:03,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742231_1407 (size=8360083) 2024-11-21T04:25:03,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742231_1407 (size=8360083) 2024-11-21T04:25:03,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742232_1408 (size=1877034) 2024-11-21T04:25:03,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742232_1408 (size=1877034) 2024-11-21T04:25:03,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742232_1408 (size=1877034) 2024-11-21T04:25:03,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742233_1409 (size=77835) 2024-11-21T04:25:03,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742233_1409 (size=77835) 2024-11-21T04:25:03,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742233_1409 (size=77835) 2024-11-21T04:25:03,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742234_1410 (size=30949) 2024-11-21T04:25:03,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742234_1410 (size=30949) 2024-11-21T04:25:03,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742234_1410 (size=30949) 2024-11-21T04:25:03,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742235_1411 (size=1597327) 2024-11-21T04:25:03,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742235_1411 (size=1597327) 2024-11-21T04:25:03,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742235_1411 (size=1597327) 2024-11-21T04:25:03,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742236_1412 (size=4695811) 2024-11-21T04:25:03,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742236_1412 (size=4695811) 2024-11-21T04:25:03,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742236_1412 (size=4695811) 2024-11-21T04:25:03,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742237_1413 (size=232957) 2024-11-21T04:25:03,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742237_1413 (size=232957) 2024-11-21T04:25:03,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742237_1413 (size=232957) 2024-11-21T04:25:03,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742238_1414 (size=127628) 2024-11-21T04:25:03,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742238_1414 (size=127628) 2024-11-21T04:25:03,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742238_1414 (size=127628) 2024-11-21T04:25:03,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742239_1415 (size=6424740) 2024-11-21T04:25:03,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742239_1415 (size=6424740) 2024-11-21T04:25:03,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742239_1415 (size=6424740) 2024-11-21T04:25:03,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742240_1416 (size=20406) 2024-11-21T04:25:03,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742240_1416 (size=20406) 2024-11-21T04:25:03,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742240_1416 (size=20406) 2024-11-21T04:25:03,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742241_1417 (size=5175431) 2024-11-21T04:25:03,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742241_1417 (size=5175431) 2024-11-21T04:25:03,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742241_1417 (size=5175431) 2024-11-21T04:25:03,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742242_1418 (size=217634) 2024-11-21T04:25:03,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742242_1418 (size=217634) 2024-11-21T04:25:03,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742242_1418 (size=217634) 2024-11-21T04:25:03,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742243_1419 (size=1832290) 2024-11-21T04:25:03,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742243_1419 (size=1832290) 2024-11-21T04:25:03,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742243_1419 (size=1832290) 2024-11-21T04:25:03,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742244_1420 (size=322274) 2024-11-21T04:25:03,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742244_1420 (size=322274) 2024-11-21T04:25:03,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742244_1420 (size=322274) 2024-11-21T04:25:03,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742245_1421 (size=440656) 2024-11-21T04:25:03,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742245_1421 (size=440656) 2024-11-21T04:25:03,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742245_1421 (size=440656) 2024-11-21T04:25:03,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742246_1422 (size=503880) 2024-11-21T04:25:03,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742246_1422 (size=503880) 2024-11-21T04:25:03,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742246_1422 (size=503880) 2024-11-21T04:25:03,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742247_1423 (size=29229) 2024-11-21T04:25:03,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742247_1423 (size=29229) 2024-11-21T04:25:03,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742247_1423 (size=29229) 2024-11-21T04:25:03,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742248_1424 (size=24096) 2024-11-21T04:25:03,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742248_1424 (size=24096) 2024-11-21T04:25:03,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742248_1424 (size=24096) 2024-11-21T04:25:03,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742249_1425 (size=111872) 2024-11-21T04:25:03,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742249_1425 (size=111872) 2024-11-21T04:25:03,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742249_1425 (size=111872) 2024-11-21T04:25:03,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742250_1426 (size=45609) 2024-11-21T04:25:03,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742250_1426 (size=45609) 2024-11-21T04:25:03,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742250_1426 (size=45609) 2024-11-21T04:25:03,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742251_1427 (size=136454) 2024-11-21T04:25:03,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742251_1427 (size=136454) 2024-11-21T04:25:03,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742251_1427 (size=136454) 2024-11-21T04:25:03,300 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T04:25:03,302 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-21T04:25:03,303 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T04:25:03,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742252_1428 (size=338) 2024-11-21T04:25:03,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742252_1428 (size=338) 2024-11-21T04:25:03,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742252_1428 (size=338) 2024-11-21T04:25:03,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742253_1429 (size=15) 2024-11-21T04:25:03,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742253_1429 (size=15) 2024-11-21T04:25:03,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742253_1429 (size=15) 2024-11-21T04:25:03,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742254_1430 (size=303780) 2024-11-21T04:25:03,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742254_1430 (size=303780) 2024-11-21T04:25:03,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742254_1430 (size=303780) 2024-11-21T04:25:04,892 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:25:04,892 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:25:04,895 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0007_000001 (auth:SIMPLE) from 127.0.0.1:40216 2024-11-21T04:25:04,907 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0007/container_1732162945431_0007_01_000001/launch_container.sh] 2024-11-21T04:25:04,907 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0007/container_1732162945431_0007_01_000001/container_tokens] 2024-11-21T04:25:04,907 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0007/container_1732162945431_0007_01_000001/sysfs] 2024-11-21T04:25:05,310 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0008_000001 (auth:SIMPLE) from 127.0.0.1:34850 2024-11-21T04:25:05,707 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:25:07,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-21T04:25:07,560 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-21T04:25:07,561 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-21T04:25:10,535 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0008_000001 (auth:SIMPLE) from 127.0.0.1:39468 2024-11-21T04:25:10,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742255_1431 (size=349430) 2024-11-21T04:25:10,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742255_1431 (size=349430) 2024-11-21T04:25:10,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742255_1431 (size=349430) 2024-11-21T04:25:12,822 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0008_000001 (auth:SIMPLE) from 127.0.0.1:50734 2024-11-21T04:25:13,063 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:25:16,350 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:25:17,314 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000002/launch_container.sh] 2024-11-21T04:25:17,314 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000002/container_tokens] 2024-11-21T04:25:17,314 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163101922/archive/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-21T04:25:18,656 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0008_000001 (auth:SIMPLE) from 127.0.0.1:50750 2024-11-21T04:25:20,116 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fa291fa3994bf7902442d1d129cd311c, had cached 0 bytes from a total of 8258 2024-11-21T04:25:20,117 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fef81f9296f7039a4489a2709be189d3, had cached 0 bytes from a total of 5356 2024-11-21T04:25:21,370 DEBUG [master/fdc0a393186c:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 445ec8afc5baaba6cc3771e00cfb74bb changed from -1.0 to 0.0, refreshing cache 2024-11-21T04:25:21,370 DEBUG [master/fdc0a393186c:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region fef81f9296f7039a4489a2709be189d3 changed from -1.0 to 0.0, refreshing cache 2024-11-21T04:25:21,370 DEBUG [master/fdc0a393186c:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region fa291fa3994bf7902442d1d129cd311c changed from -1.0 to 0.0, refreshing cache 2024-11-21T04:25:21,370 DEBUG [master/fdc0a393186c:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d6599fc11fc065b339177b6c0b07cb1c changed from -1.0 to 0.0, refreshing cache 2024-11-21T04:25:22,391 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000003/launch_container.sh] 2024-11-21T04:25:22,391 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000003/container_tokens] 2024-11-21T04:25:22,391 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163101922/archive/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-21T04:25:23,675 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0008_000001 (auth:SIMPLE) from 127.0.0.1:56732 2024-11-21T04:25:27,471 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_2/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000004/launch_container.sh] 2024-11-21T04:25:27,471 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_2/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000004/container_tokens] 2024-11-21T04:25:27,471 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_2/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/local-export-1732163101922/archive/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-21T04:25:28,687 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0008_000001 (auth:SIMPLE) from 127.0.0.1:36448 2024-11-21T04:25:32,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742256_1432 (size=21330) 2024-11-21T04:25:32,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742256_1432 (size=21330) 2024-11-21T04:25:32,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742256_1432 (size=21330) 2024-11-21T04:25:32,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742257_1433 (size=460) 2024-11-21T04:25:32,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742257_1433 (size=460) 2024-11-21T04:25:32,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742257_1433 (size=460) 2024-11-21T04:25:32,411 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000005/launch_container.sh] 2024-11-21T04:25:32,411 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000005/container_tokens] 2024-11-21T04:25:32,412 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_2/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000005/sysfs] 2024-11-21T04:25:32,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742258_1434 (size=21330) 2024-11-21T04:25:32,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742258_1434 (size=21330) 2024-11-21T04:25:32,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742258_1434 (size=21330) 2024-11-21T04:25:32,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742259_1435 (size=349430) 2024-11-21T04:25:32,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742259_1435 (size=349430) 2024-11-21T04:25:32,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742259_1435 (size=349430) 2024-11-21T04:25:32,451 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0008_000001 (auth:SIMPLE) from 127.0.0.1:32884 2024-11-21T04:25:33,609 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1230): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1732162945431_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:938) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1207) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:352) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T04:25:33,611 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163133611 2024-11-21T04:25:33,611 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44067, tgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163133611, rawTgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163133611, srcFsUri=hdfs://localhost:44067, srcDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:25:33,654 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44067, inputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:25:33,654 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163133611, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163133611/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-21T04:25:33,656 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T04:25:33,661 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163133611/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-21T04:25:33,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742260_1436 (size=156) 2024-11-21T04:25:33,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742260_1436 (size=156) 2024-11-21T04:25:33,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742260_1436 (size=156) 2024-11-21T04:25:33,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742261_1437 (size=621) 2024-11-21T04:25:33,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742261_1437 (size=621) 2024-11-21T04:25:33,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742261_1437 (size=621) 2024-11-21T04:25:34,081 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:34,082 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:34,082 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:35,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-9719045844143339320.jar 2024-11-21T04:25:35,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:35,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:35,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-1012838691820905221.jar 2024-11-21T04:25:35,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:35,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:35,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:35,373 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:35,374 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:35,374 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:35,374 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T04:25:35,374 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T04:25:35,375 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T04:25:35,375 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T04:25:35,375 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T04:25:35,376 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T04:25:35,376 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T04:25:35,376 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T04:25:35,376 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T04:25:35,377 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T04:25:35,377 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T04:25:35,377 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:35,377 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:35,377 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:25:35,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:35,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:35,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:25:35,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:25:35,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742262_1438 (size=131440) 2024-11-21T04:25:35,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742262_1438 (size=131440) 2024-11-21T04:25:35,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742262_1438 (size=131440) 2024-11-21T04:25:35,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742263_1439 (size=4188619) 2024-11-21T04:25:35,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742263_1439 (size=4188619) 2024-11-21T04:25:35,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742263_1439 (size=4188619) 2024-11-21T04:25:35,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742264_1440 (size=1323991) 2024-11-21T04:25:35,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742264_1440 (size=1323991) 2024-11-21T04:25:35,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742264_1440 (size=1323991) 2024-11-21T04:25:35,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742265_1441 (size=903734) 2024-11-21T04:25:35,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742265_1441 (size=903734) 2024-11-21T04:25:35,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742265_1441 (size=903734) 2024-11-21T04:25:35,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742266_1442 (size=8360083) 2024-11-21T04:25:35,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742266_1442 (size=8360083) 2024-11-21T04:25:35,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742266_1442 (size=8360083) 2024-11-21T04:25:35,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742267_1443 (size=1877034) 2024-11-21T04:25:35,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742267_1443 (size=1877034) 2024-11-21T04:25:35,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742267_1443 (size=1877034) 2024-11-21T04:25:35,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742268_1444 (size=77835) 2024-11-21T04:25:35,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742268_1444 (size=77835) 2024-11-21T04:25:35,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742268_1444 (size=77835) 2024-11-21T04:25:35,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742269_1445 (size=6424740) 2024-11-21T04:25:35,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742269_1445 (size=6424740) 2024-11-21T04:25:35,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742269_1445 (size=6424740) 2024-11-21T04:25:35,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742270_1446 (size=30949) 2024-11-21T04:25:35,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742270_1446 (size=30949) 2024-11-21T04:25:35,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742270_1446 (size=30949) 2024-11-21T04:25:35,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742271_1447 (size=1597327) 2024-11-21T04:25:35,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742271_1447 (size=1597327) 2024-11-21T04:25:35,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742271_1447 (size=1597327) 2024-11-21T04:25:35,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742272_1448 (size=4695811) 2024-11-21T04:25:35,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742272_1448 (size=4695811) 2024-11-21T04:25:35,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742272_1448 (size=4695811) 2024-11-21T04:25:35,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742273_1449 (size=232957) 2024-11-21T04:25:35,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742273_1449 (size=232957) 2024-11-21T04:25:35,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742273_1449 (size=232957) 2024-11-21T04:25:35,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742274_1450 (size=127628) 2024-11-21T04:25:35,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742274_1450 (size=127628) 2024-11-21T04:25:35,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742274_1450 (size=127628) 2024-11-21T04:25:35,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742275_1451 (size=20406) 2024-11-21T04:25:35,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742275_1451 (size=20406) 2024-11-21T04:25:35,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742275_1451 (size=20406) 2024-11-21T04:25:35,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742276_1452 (size=5175431) 2024-11-21T04:25:35,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742276_1452 (size=5175431) 2024-11-21T04:25:35,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742276_1452 (size=5175431) 2024-11-21T04:25:35,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742277_1453 (size=217634) 2024-11-21T04:25:35,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742277_1453 (size=217634) 2024-11-21T04:25:35,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742277_1453 (size=217634) 2024-11-21T04:25:35,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742278_1454 (size=1832290) 2024-11-21T04:25:35,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742278_1454 (size=1832290) 2024-11-21T04:25:35,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742278_1454 (size=1832290) 2024-11-21T04:25:35,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742279_1455 (size=322274) 2024-11-21T04:25:35,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742279_1455 (size=322274) 2024-11-21T04:25:35,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742279_1455 (size=322274) 2024-11-21T04:25:35,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742280_1456 (size=503880) 2024-11-21T04:25:35,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742280_1456 (size=503880) 2024-11-21T04:25:35,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742280_1456 (size=503880) 2024-11-21T04:25:35,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742281_1457 (size=29229) 2024-11-21T04:25:35,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742281_1457 (size=29229) 2024-11-21T04:25:35,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742281_1457 (size=29229) 2024-11-21T04:25:35,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742282_1458 (size=24096) 2024-11-21T04:25:35,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742282_1458 (size=24096) 2024-11-21T04:25:35,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742282_1458 (size=24096) 2024-11-21T04:25:35,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742283_1459 (size=111872) 2024-11-21T04:25:35,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742283_1459 (size=111872) 2024-11-21T04:25:35,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742283_1459 (size=111872) 2024-11-21T04:25:35,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742284_1460 (size=440656) 2024-11-21T04:25:35,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742284_1460 (size=440656) 2024-11-21T04:25:35,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742284_1460 (size=440656) 2024-11-21T04:25:35,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742285_1461 (size=45609) 2024-11-21T04:25:35,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742285_1461 (size=45609) 2024-11-21T04:25:35,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742285_1461 (size=45609) 2024-11-21T04:25:35,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742286_1462 (size=136454) 2024-11-21T04:25:35,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742286_1462 (size=136454) 2024-11-21T04:25:35,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742286_1462 (size=136454) 2024-11-21T04:25:35,997 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T04:25:35,999 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-21T04:25:36,003 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T04:25:36,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742287_1463 (size=338) 2024-11-21T04:25:36,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742287_1463 (size=338) 2024-11-21T04:25:36,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742287_1463 (size=338) 2024-11-21T04:25:36,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742288_1464 (size=15) 2024-11-21T04:25:36,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742288_1464 (size=15) 2024-11-21T04:25:36,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742288_1464 (size=15) 2024-11-21T04:25:36,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742289_1465 (size=303728) 2024-11-21T04:25:36,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742289_1465 (size=303728) 2024-11-21T04:25:36,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742289_1465 (size=303728) 2024-11-21T04:25:38,509 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:25:38,509 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:25:38,515 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0008_000001 (auth:SIMPLE) from 127.0.0.1:46126 2024-11-21T04:25:38,528 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_0/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000001/launch_container.sh] 2024-11-21T04:25:38,528 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_0/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000001/container_tokens] 2024-11-21T04:25:38,529 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_0/usercache/jenkins/appcache/application_1732162945431_0008/container_1732162945431_0008_01_000001/sysfs] 2024-11-21T04:25:39,397 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0009_000001 (auth:SIMPLE) from 127.0.0.1:32896 2024-11-21T04:25:44,026 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0009_000001 (auth:SIMPLE) from 127.0.0.1:46686 2024-11-21T04:25:44,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742290_1466 (size=349378) 2024-11-21T04:25:44,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742290_1466 (size=349378) 2024-11-21T04:25:44,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742290_1466 (size=349378) 2024-11-21T04:25:45,873 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d6599fc11fc065b339177b6c0b07cb1c, had cached 0 bytes from a total of 5490 2024-11-21T04:25:45,910 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 445ec8afc5baaba6cc3771e00cfb74bb, had cached 0 bytes from a total of 8122 2024-11-21T04:25:46,305 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0009_000001 (auth:SIMPLE) from 127.0.0.1:52354 2024-11-21T04:25:46,350 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:25:49,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742291_1467 (size=8122) 2024-11-21T04:25:49,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742291_1467 (size=8122) 2024-11-21T04:25:49,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742291_1467 (size=8122) 2024-11-21T04:25:50,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742292_1468 (size=5490) 2024-11-21T04:25:50,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742292_1468 (size=5490) 2024-11-21T04:25:50,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742292_1468 (size=5490) 2024-11-21T04:25:50,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742293_1469 (size=17413) 2024-11-21T04:25:50,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742293_1469 (size=17413) 2024-11-21T04:25:50,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742293_1469 (size=17413) 2024-11-21T04:25:50,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742294_1470 (size=462) 2024-11-21T04:25:50,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742294_1470 (size=462) 2024-11-21T04:25:50,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742294_1470 (size=462) 2024-11-21T04:25:50,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742295_1471 (size=17413) 2024-11-21T04:25:50,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742295_1471 (size=17413) 2024-11-21T04:25:50,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742295_1471 (size=17413) 2024-11-21T04:25:50,169 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_1/usercache/jenkins/appcache/application_1732162945431_0009/container_1732162945431_0009_01_000002/launch_container.sh] 2024-11-21T04:25:50,169 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_1/usercache/jenkins/appcache/application_1732162945431_0009/container_1732162945431_0009_01_000002/container_tokens] 2024-11-21T04:25:50,169 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_1/usercache/jenkins/appcache/application_1732162945431_0009/container_1732162945431_0009_01_000002/sysfs] 2024-11-21T04:25:50,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742296_1472 (size=349378) 2024-11-21T04:25:50,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742296_1472 (size=349378) 2024-11-21T04:25:50,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742296_1472 (size=349378) 2024-11-21T04:25:50,190 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0009_000001 (auth:SIMPLE) from 127.0.0.1:52356 2024-11-21T04:25:51,277 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T04:25:51,278 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T04:25:51,283 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportWithChecksum 2024-11-21T04:25:51,283 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T04:25:51,284 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T04:25:51,284 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-21T04:25:51,284 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-21T04:25:51,284 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-21T04:25:51,284 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163133611/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163133611/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-21T04:25:51,284 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163133611/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-21T04:25:51,284 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163133611/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-21T04:25:51,289 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-11-21T04:25:51,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=186, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-21T04:25:51,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-21T04:25:51,293 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163151293"}]},"ts":"1732163151293"} 2024-11-21T04:25:51,295 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-21T04:25:51,295 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-21T04:25:51,300 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=187, ppid=186, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-21T04:25:51,302 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d6599fc11fc065b339177b6c0b07cb1c, UNASSIGN}, {pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=445ec8afc5baaba6cc3771e00cfb74bb, UNASSIGN}] 2024-11-21T04:25:51,304 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d6599fc11fc065b339177b6c0b07cb1c, UNASSIGN 2024-11-21T04:25:51,304 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=445ec8afc5baaba6cc3771e00cfb74bb, UNASSIGN 2024-11-21T04:25:51,305 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=445ec8afc5baaba6cc3771e00cfb74bb, regionState=CLOSING, regionLocation=fdc0a393186c,34363,1732162938185 2024-11-21T04:25:51,305 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=d6599fc11fc065b339177b6c0b07cb1c, regionState=CLOSING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:25:51,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d6599fc11fc065b339177b6c0b07cb1c, UNASSIGN because future has completed 2024-11-21T04:25:51,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=445ec8afc5baaba6cc3771e00cfb74bb, UNASSIGN because future has completed 2024-11-21T04:25:51,311 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:25:51,311 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:25:51,312 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure d6599fc11fc065b339177b6c0b07cb1c, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:25:51,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=191, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb, server=fdc0a393186c,34363,1732162938185}] 2024-11-21T04:25:51,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-21T04:25:51,466 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(122): Close 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:51,466 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(122): Close d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:51,466 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:25:51,466 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:25:51,466 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1722): Closing d6599fc11fc065b339177b6c0b07cb1c, disabling compactions & flushes 2024-11-21T04:25:51,466 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1722): Closing 445ec8afc5baaba6cc3771e00cfb74bb, disabling compactions & flushes 2024-11-21T04:25:51,466 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:51,466 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:51,466 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:51,466 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:51,467 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. after waiting 1 ms 2024-11-21T04:25:51,467 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. after waiting 0 ms 2024-11-21T04:25:51,467 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:51,467 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:51,470 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:25:51,470 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:25:51,470 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:25:51,470 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:25:51,471 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c. 2024-11-21T04:25:51,471 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb. 2024-11-21T04:25:51,471 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1676): Region close journal for d6599fc11fc065b339177b6c0b07cb1c: Waiting for close lock at 1732163151466Running coprocessor pre-close hooks at 1732163151466Disabling compacts and flushes for region at 1732163151466Disabling writes for close at 1732163151467 (+1 ms)Writing region close event to WAL at 1732163151467Running coprocessor post-close hooks at 1732163151470 (+3 ms)Closed at 1732163151471 (+1 ms) 2024-11-21T04:25:51,471 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1676): Region close journal for 445ec8afc5baaba6cc3771e00cfb74bb: Waiting for close lock at 1732163151466Running coprocessor pre-close hooks at 1732163151466Disabling compacts and flushes for region at 1732163151466Disabling writes for close at 1732163151467 (+1 ms)Writing region close event to WAL at 1732163151467Running coprocessor post-close hooks at 1732163151470 (+3 ms)Closed at 1732163151471 (+1 ms) 2024-11-21T04:25:51,472 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(157): Closed 445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:51,473 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=445ec8afc5baaba6cc3771e00cfb74bb, regionState=CLOSED 2024-11-21T04:25:51,473 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(157): Closed d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:51,474 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=d6599fc11fc065b339177b6c0b07cb1c, regionState=CLOSED 2024-11-21T04:25:51,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb, server=fdc0a393186c,34363,1732162938185 because future has completed 2024-11-21T04:25:51,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure d6599fc11fc065b339177b6c0b07cb1c, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:25:51,478 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-11-21T04:25:51,478 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; CloseRegionProcedure 445ec8afc5baaba6cc3771e00cfb74bb, server=fdc0a393186c,34363,1732162938185 in 164 msec 2024-11-21T04:25:51,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=188 2024-11-21T04:25:51,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=188, state=SUCCESS, hasLock=false; CloseRegionProcedure d6599fc11fc065b339177b6c0b07cb1c, server=fdc0a393186c,42393,1732162938268 in 165 msec 2024-11-21T04:25:51,479 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=445ec8afc5baaba6cc3771e00cfb74bb, UNASSIGN in 176 msec 2024-11-21T04:25:51,480 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=188, resume processing ppid=187 2024-11-21T04:25:51,480 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=d6599fc11fc065b339177b6c0b07cb1c, UNASSIGN in 177 msec 2024-11-21T04:25:51,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=186 2024-11-21T04:25:51,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=186, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 181 msec 2024-11-21T04:25:51,483 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163151483"}]},"ts":"1732163151483"} 2024-11-21T04:25:51,485 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-21T04:25:51,485 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-21T04:25:51,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 196 msec 2024-11-21T04:25:51,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-21T04:25:51,612 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-21T04:25:51,612 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-11-21T04:25:51,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T04:25:51,614 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T04:25:51,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-21T04:25:51,615 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=192, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T04:25:51,616 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-21T04:25:51,617 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:51,617 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:51,619 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/recovered.edits] 2024-11-21T04:25:51,619 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/recovered.edits] 2024-11-21T04:25:51,622 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/cf/34d83094789d420a8248c2adff2a99f7 2024-11-21T04:25:51,622 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/cf/983ec68adf534945b6b4fb41dc2edf47 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/cf/983ec68adf534945b6b4fb41dc2edf47 2024-11-21T04:25:51,623 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c/recovered.edits/9.seqid 2024-11-21T04:25:51,623 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb/recovered.edits/9.seqid 2024-11-21T04:25:51,624 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/d6599fc11fc065b339177b6c0b07cb1c 2024-11-21T04:25:51,624 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportWithChecksum/445ec8afc5baaba6cc3771e00cfb74bb 2024-11-21T04:25:51,624 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-21T04:25:51,626 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=192, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T04:25:51,628 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-21T04:25:51,630 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-21T04:25:51,631 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=192, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T04:25:51,631 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-21T04:25:51,631 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163151631"}]},"ts":"9223372036854775807"} 2024-11-21T04:25:51,631 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163151631"}]},"ts":"9223372036854775807"} 2024-11-21T04:25:51,633 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T04:25:51,633 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d6599fc11fc065b339177b6c0b07cb1c, NAME => 'testtb-testExportWithChecksum,,1732163100475.d6599fc11fc065b339177b6c0b07cb1c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 445ec8afc5baaba6cc3771e00cfb74bb, NAME => 'testtb-testExportWithChecksum,1,1732163100475.445ec8afc5baaba6cc3771e00cfb74bb.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T04:25:51,633 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-21T04:25:51,633 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732163151633"}]},"ts":"9223372036854775807"} 2024-11-21T04:25:51,634 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-21T04:25:51,635 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=192, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-21T04:25:51,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 23 msec 2024-11-21T04:25:51,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T04:25:51,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T04:25:51,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T04:25:51,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T04:25:51,680 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-21T04:25:51,680 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-21T04:25:51,680 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-21T04:25:51,680 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-21T04:25:51,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T04:25:51,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T04:25:51,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T04:25:51,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-21T04:25:51,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:51,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:51,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:51,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:51,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-21T04:25:51,689 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-21T04:25:51,689 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-21T04:25:51,692 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:51,692 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:51,692 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:51,692 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:51,694 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-21T04:25:51,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-21T04:25:51,697 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-21T04:25:51,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-21T04:25:51,722 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=810 (was 812), OpenFileDescriptor=809 (was 819), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=621 (was 453) - SystemLoadAverage LEAK? -, ProcessCount=21 (was 18) - ProcessCount LEAK? -, AvailableMemoryMB=1701 (was 2218) 2024-11-21T04:25:51,722 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-11-21T04:25:51,737 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=810, OpenFileDescriptor=809, MaxFileDescriptor=1048576, SystemLoadAverage=621, ProcessCount=21, AvailableMemoryMB=1698 2024-11-21T04:25:51,737 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-11-21T04:25:51,738 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T04:25:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:51,740 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T04:25:51,740 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:25:51,740 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 193 2024-11-21T04:25:51,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-21T04:25:51,741 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T04:25:51,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742297_1473 (size=418) 2024-11-21T04:25:51,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742297_1473 (size=418) 2024-11-21T04:25:51,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742297_1473 (size=418) 2024-11-21T04:25:51,751 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 52ed441b421ce2cc5a5468fd1a59d9ef, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:25:51,752 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ba678bc49194292cd74c6a8f0b61d330, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:25:51,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742299_1475 (size=79) 2024-11-21T04:25:51,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742299_1475 (size=79) 2024-11-21T04:25:51,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742299_1475 (size=79) 2024-11-21T04:25:51,768 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:25:51,768 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 52ed441b421ce2cc5a5468fd1a59d9ef, disabling compactions & flushes 2024-11-21T04:25:51,768 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:51,768 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:51,768 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. after waiting 0 ms 2024-11-21T04:25:51,768 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:51,769 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:51,769 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 52ed441b421ce2cc5a5468fd1a59d9ef: Waiting for close lock at 1732163151768Disabling compacts and flushes for region at 1732163151768Disabling writes for close at 1732163151768Writing region close event to WAL at 1732163151768Closed at 1732163151768 2024-11-21T04:25:51,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742298_1474 (size=79) 2024-11-21T04:25:51,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742298_1474 (size=79) 2024-11-21T04:25:51,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742298_1474 (size=79) 2024-11-21T04:25:51,772 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:25:51,773 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing ba678bc49194292cd74c6a8f0b61d330, disabling compactions & flushes 2024-11-21T04:25:51,773 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:25:51,773 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:25:51,773 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. after waiting 0 ms 2024-11-21T04:25:51,773 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:25:51,773 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:25:51,773 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for ba678bc49194292cd74c6a8f0b61d330: Waiting for close lock at 1732163151773Disabling compacts and flushes for region at 1732163151773Disabling writes for close at 1732163151773Writing region close event to WAL at 1732163151773Closed at 1732163151773 2024-11-21T04:25:51,774 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T04:25:51,774 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732163151774"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163151774"}]},"ts":"1732163151774"} 2024-11-21T04:25:51,774 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732163151774"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732163151774"}]},"ts":"1732163151774"} 2024-11-21T04:25:51,777 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-21T04:25:51,778 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T04:25:51,779 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163151778"}]},"ts":"1732163151778"} 2024-11-21T04:25:51,780 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-21T04:25:51,780 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {fdc0a393186c=0} racks are {/default-rack=0} 2024-11-21T04:25:51,781 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-21T04:25:51,782 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-21T04:25:51,782 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-21T04:25:51,782 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T04:25:51,782 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-21T04:25:51,782 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-21T04:25:51,782 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T04:25:51,782 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-21T04:25:51,782 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-21T04:25:51,782 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-21T04:25:51,782 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=52ed441b421ce2cc5a5468fd1a59d9ef, ASSIGN}, {pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ba678bc49194292cd74c6a8f0b61d330, ASSIGN}] 2024-11-21T04:25:51,783 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=52ed441b421ce2cc5a5468fd1a59d9ef, ASSIGN 2024-11-21T04:25:51,783 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ba678bc49194292cd74c6a8f0b61d330, ASSIGN 2024-11-21T04:25:51,784 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=52ed441b421ce2cc5a5468fd1a59d9ef, ASSIGN; state=OFFLINE, location=fdc0a393186c,39347,1732162938035; forceNewPlan=false, retain=false 2024-11-21T04:25:51,784 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ba678bc49194292cd74c6a8f0b61d330, ASSIGN; state=OFFLINE, location=fdc0a393186c,42393,1732162938268; forceNewPlan=false, retain=false 2024-11-21T04:25:51,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-21T04:25:51,935 INFO [fdc0a393186c:39577 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-21T04:25:51,935 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=ba678bc49194292cd74c6a8f0b61d330, regionState=OPENING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:25:51,935 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=52ed441b421ce2cc5a5468fd1a59d9ef, regionState=OPENING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:25:51,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=52ed441b421ce2cc5a5468fd1a59d9ef, ASSIGN because future has completed 2024-11-21T04:25:51,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:25:51,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ba678bc49194292cd74c6a8f0b61d330, ASSIGN because future has completed 2024-11-21T04:25:51,939 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=197, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure ba678bc49194292cd74c6a8f0b61d330, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:25:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-21T04:25:52,093 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:52,093 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7752): Opening region: {ENCODED => 52ed441b421ce2cc5a5468fd1a59d9ef, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef.', STARTKEY => '', ENDKEY => '1'} 2024-11-21T04:25:52,093 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. service=AccessControlService 2024-11-21T04:25:52,094 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:25:52,094 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,094 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:25:52,094 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7794): checking encryption for 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,094 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7797): checking classloading for 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,099 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:25:52,099 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7752): Opening region: {ENCODED => ba678bc49194292cd74c6a8f0b61d330, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330.', STARTKEY => '1', ENDKEY => ''} 2024-11-21T04:25:52,100 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. service=AccessControlService 2024-11-21T04:25:52,100 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-21T04:25:52,100 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,100 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T04:25:52,100 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7794): checking encryption for ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,100 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7797): checking classloading for ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,119 INFO [StoreOpener-52ed441b421ce2cc5a5468fd1a59d9ef-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,121 INFO [StoreOpener-52ed441b421ce2cc5a5468fd1a59d9ef-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 52ed441b421ce2cc5a5468fd1a59d9ef columnFamilyName cf 2024-11-21T04:25:52,121 DEBUG [StoreOpener-52ed441b421ce2cc5a5468fd1a59d9ef-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:25:52,121 INFO [StoreOpener-52ed441b421ce2cc5a5468fd1a59d9ef-1 {}] regionserver.HStore(327): Store=52ed441b421ce2cc5a5468fd1a59d9ef/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:25:52,121 INFO [StoreOpener-ba678bc49194292cd74c6a8f0b61d330-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,122 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1038): replaying wal for 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,122 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,123 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,123 INFO [StoreOpener-ba678bc49194292cd74c6a8f0b61d330-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ba678bc49194292cd74c6a8f0b61d330 columnFamilyName cf 2024-11-21T04:25:52,123 DEBUG [StoreOpener-ba678bc49194292cd74c6a8f0b61d330-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T04:25:52,123 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1048): stopping wal replay for 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,123 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1060): Cleaning up temporary data for 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,125 INFO [StoreOpener-ba678bc49194292cd74c6a8f0b61d330-1 {}] regionserver.HStore(327): Store=ba678bc49194292cd74c6a8f0b61d330/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T04:25:52,125 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1038): replaying wal for ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,126 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,126 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,126 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1093): writing seq id for 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,127 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1048): stopping wal replay for ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,127 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1060): Cleaning up temporary data for ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,128 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1093): writing seq id for ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,139 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:25:52,140 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1114): Opened ba678bc49194292cd74c6a8f0b61d330; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70018432, jitterRate=0.04335594177246094}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:25:52,140 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,140 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1006): Region open journal for ba678bc49194292cd74c6a8f0b61d330: Running coprocessor pre-open hook at 1732163152100Writing region info on filesystem at 1732163152100Initializing all the Stores at 1732163152101 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163152101Cleaning up temporary data from old regions at 1732163152127 (+26 ms)Running coprocessor post-open hooks at 1732163152140 (+13 ms)Region opened successfully at 1732163152140 2024-11-21T04:25:52,141 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330., pid=197, masterSystemTime=1732163152091 2024-11-21T04:25:52,144 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:25:52,144 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:25:52,145 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=ba678bc49194292cd74c6a8f0b61d330, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:25:52,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure ba678bc49194292cd74c6a8f0b61d330, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:25:52,150 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T04:25:52,151 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1114): Opened 52ed441b421ce2cc5a5468fd1a59d9ef; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64916013, jitterRate=-0.03267602622509003}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-21T04:25:52,151 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,151 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1006): Region open journal for 52ed441b421ce2cc5a5468fd1a59d9ef: Running coprocessor pre-open hook at 1732163152094Writing region info on filesystem at 1732163152094Initializing all the Stores at 1732163152095 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732163152095Cleaning up temporary data from old regions at 1732163152123 (+28 ms)Running coprocessor post-open hooks at 1732163152151 (+28 ms)Region opened successfully at 1732163152151 2024-11-21T04:25:52,152 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef., pid=196, masterSystemTime=1732163152089 2024-11-21T04:25:52,156 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-11-21T04:25:52,156 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; OpenRegionProcedure ba678bc49194292cd74c6a8f0b61d330, server=fdc0a393186c,42393,1732162938268 in 211 msec 2024-11-21T04:25:52,158 DEBUG [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:52,158 INFO [RS_OPEN_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:52,158 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ba678bc49194292cd74c6a8f0b61d330, ASSIGN in 374 msec 2024-11-21T04:25:52,158 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=52ed441b421ce2cc5a5468fd1a59d9ef, regionState=OPEN, openSeqNum=2, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:25:52,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:25:52,164 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=194 2024-11-21T04:25:52,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=194, state=SUCCESS, hasLock=false; OpenRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef, server=fdc0a393186c,39347,1732162938035 in 225 msec 2024-11-21T04:25:52,166 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=194, resume processing ppid=193 2024-11-21T04:25:52,166 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=52ed441b421ce2cc5a5468fd1a59d9ef, ASSIGN in 382 msec 2024-11-21T04:25:52,167 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T04:25:52,167 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163152167"}]},"ts":"1732163152167"} 2024-11-21T04:25:52,170 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-21T04:25:52,171 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T04:25:52,172 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-21T04:25:52,176 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-21T04:25:52,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:52,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:52,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:52,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:25:52,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:52,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:52,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:52,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:52,222 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:52,222 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:52,222 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:52,222 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-21T04:25:52,223 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 483 msec 2024-11-21T04:25:52,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-21T04:25:52,373 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-21T04:25:52,373 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-11-21T04:25:52,373 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:25:52,378 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-11-21T04:25:52,378 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:25:52,378 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-11-21T04:25:52,378 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-21T04:25:52,380 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-21T04:25:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163152381 (current time:1732163152381). 2024-11-21T04:25:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:25:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-21T04:25:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:25:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@654d8d67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:25:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:25:52,383 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:25:52,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:25:52,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:25:52,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@776134c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:25:52,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:25:52,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,385 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48274, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:25:52,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@214b513, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:25:52,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:25:52,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:52,389 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43524, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:52,392 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:25:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:25:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,393 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:25:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@625e81ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:25:52,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:25:52,394 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:25:52,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:25:52,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:25:52,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f899a52, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:25:52,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:25:52,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,396 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48284, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:25:52,397 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65069286, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:25:52,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:25:52,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:52,400 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43526, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:52,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:52,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:52,407 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49574, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:52,408 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:25:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:25:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,409 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:25:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-21T04:25:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:25:52,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-21T04:25:52,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-21T04:25:52,414 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:25:52,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-21T04:25:52,415 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:25:52,418 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:25:52,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742300_1476 (size=203) 2024-11-21T04:25:52,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742300_1476 (size=203) 2024-11-21T04:25:52,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742300_1476 (size=203) 2024-11-21T04:25:52,439 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:25:52,439 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef}, {pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba678bc49194292cd74c6a8f0b61d330}] 2024-11-21T04:25:52,441 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,441 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-21T04:25:52,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=199 2024-11-21T04:25:52,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:52,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=200 2024-11-21T04:25:52,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:25:52,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.HRegion(2603): Flush status journal for 52ed441b421ce2cc5a5468fd1a59d9ef: 2024-11-21T04:25:52,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-21T04:25:52,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.HRegion(2603): Flush status journal for ba678bc49194292cd74c6a8f0b61d330: 2024-11-21T04:25:52,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-21T04:25:52,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:52,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:25:52,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:52,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:25:52,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:25:52,593 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-21T04:25:52,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742301_1477 (size=82) 2024-11-21T04:25:52,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742301_1477 (size=82) 2024-11-21T04:25:52,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742301_1477 (size=82) 2024-11-21T04:25:52,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:25:52,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=200 2024-11-21T04:25:52,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=200 2024-11-21T04:25:52,615 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,615 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ba678bc49194292cd74c6a8f0b61d330 in 177 msec 2024-11-21T04:25:52,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742302_1478 (size=82) 2024-11-21T04:25:52,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742302_1478 (size=82) 2024-11-21T04:25:52,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742302_1478 (size=82) 2024-11-21T04:25:52,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:52,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=199 2024-11-21T04:25:52,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=199 2024-11-21T04:25:52,629 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,629 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=198 2024-11-21T04:25:52,634 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:25:52,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef in 192 msec 2024-11-21T04:25:52,635 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:25:52,639 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:25:52,639 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:52,640 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:52,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742303_1479 (size=585) 2024-11-21T04:25:52,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742303_1479 (size=585) 2024-11-21T04:25:52,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742303_1479 (size=585) 2024-11-21T04:25:52,660 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:25:52,665 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:25:52,665 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:52,667 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:25:52,667 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-21T04:25:52,668 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 256 msec 2024-11-21T04:25:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-21T04:25:52,732 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-21T04:25:52,742 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='0d8d56aa6485d24ce435314ee6e217d7c', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef., hostname=fdc0a393186c,39347,1732162938035, seqNum=2] 2024-11-21T04:25:52,748 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='2d4268e73861169b1757818b7de4add8c', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:52,749 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='36ccc6a86618ce51599e6975e30881968', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:52,749 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='4688809e1a20d7323f7f1998f4ae5f4f5', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:52,750 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='158699eff2eee814fa3f193fc8db57baa', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:52,751 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='619a1f0a22c87f11a74a53772331ec120', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:52,752 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='ceecac9e6e7022f36c3717053d4d437a', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:52,752 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='b2b162c4fa1ff0f98ee5c31220736131', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:52,753 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='57b6f289eb3da6b508ca9339e2b18888e', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:52,760 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39347 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:25:52,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42393 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. with WAL disabled. Data may be lost in the event of a crash. 2024-11-21T04:25:52,767 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-21T04:25:52,770 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:52,770 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:52,771 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-21T04:25:52,773 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-21T04:25:52,780 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-21T04:25:52,788 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-21T04:25:52,793 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-21T04:25:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732163152793 (current time:1732163152793). 2024-11-21T04:25:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-21T04:25:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-21T04:25:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-21T04:25:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b1f171a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:25:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:25:52,796 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:25:52,796 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:25:52,796 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:25:52,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@557200a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:25:52,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:25:52,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,798 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48312, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:25:52,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cff82db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:25:52,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:25:52,800 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:52,801 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43538, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:52,802 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:25:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:25:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,802 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:25:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16e29178, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ClusterIdFetcher(90): Going to request fdc0a393186c,39577,-1 for getting cluster id 2024-11-21T04:25:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T04:25:52,804 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '572b726e-d928-426e-9114-1cd37decf96c' 2024-11-21T04:25:52,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T04:25:52,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "572b726e-d928-426e-9114-1cd37decf96c" 2024-11-21T04:25:52,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b42f7bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fdc0a393186c,39577,-1] 2024-11-21T04:25:52,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T04:25:52,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,805 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48334, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T04:25:52,806 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a0a798f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T04:25:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T04:25:52,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fdc0a393186c,34363,1732162938185, seqNum=-1] 2024-11-21T04:25:52,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:52,809 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43554, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:52,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., hostname=fdc0a393186c,42393,1732162938268, seqNum=2] 2024-11-21T04:25:52,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T04:25:52,813 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49580, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T04:25:52,814 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577. 2024-11-21T04:25:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor164.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-21T04:25:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:25:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-21T04:25:52,815 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:25:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-21T04:25:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-21T04:25:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-21T04:25:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-21T04:25:52,818 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-21T04:25:52,819 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-21T04:25:52,822 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-21T04:25:52,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742304_1480 (size=198) 2024-11-21T04:25:52,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742304_1480 (size=198) 2024-11-21T04:25:52,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742304_1480 (size=198) 2024-11-21T04:25:52,839 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-21T04:25:52,839 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef}, {pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba678bc49194292cd74c6a8f0b61d330}] 2024-11-21T04:25:52,840 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:52,840 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:52,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-21T04:25:52,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39347 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-21T04:25:52,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42393 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=203 2024-11-21T04:25:52,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:52,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:25:52,992 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2902): Flushing 52ed441b421ce2cc5a5468fd1a59d9ef 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-21T04:25:52,992 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2902): Flushing ba678bc49194292cd74c6a8f0b61d330 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-21T04:25:53,010 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/.tmp/cf/a06cbc5a046c4494bb0ebb5ed3522840 is 71, key is 0869799841970677b69a061a7785f785/cf:q/1732163152760/Put/seqid=0 2024-11-21T04:25:53,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/.tmp/cf/ceb1dda8a0d241ef9bcfdde0aa99c63c is 71, key is 23f9f854fffe6cf9ea8da220d840b036/cf:q/1732163152763/Put/seqid=0 2024-11-21T04:25:53,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742305_1481 (size=5216) 2024-11-21T04:25:53,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742305_1481 (size=5216) 2024-11-21T04:25:53,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742305_1481 (size=5216) 2024-11-21T04:25:53,024 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/.tmp/cf/a06cbc5a046c4494bb0ebb5ed3522840 2024-11-21T04:25:53,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/.tmp/cf/a06cbc5a046c4494bb0ebb5ed3522840 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/cf/a06cbc5a046c4494bb0ebb5ed3522840 2024-11-21T04:25:53,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/cf/a06cbc5a046c4494bb0ebb5ed3522840, entries=2, sequenceid=6, filesize=5.1 K 2024-11-21T04:25:53,034 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 52ed441b421ce2cc5a5468fd1a59d9ef in 42ms, sequenceid=6, compaction requested=false 2024-11-21T04:25:53,034 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-21T04:25:53,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 52ed441b421ce2cc5a5468fd1a59d9ef: 2024-11-21T04:25:53,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-21T04:25:53,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:53,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:25:53,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/cf/a06cbc5a046c4494bb0ebb5ed3522840] hfiles 2024-11-21T04:25:53,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/cf/a06cbc5a046c4494bb0ebb5ed3522840 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:53,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742306_1482 (size=8394) 2024-11-21T04:25:53,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742306_1482 (size=8394) 2024-11-21T04:25:53,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742306_1482 (size=8394) 2024-11-21T04:25:53,038 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/.tmp/cf/ceb1dda8a0d241ef9bcfdde0aa99c63c 2024-11-21T04:25:53,043 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/.tmp/cf/ceb1dda8a0d241ef9bcfdde0aa99c63c as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/cf/ceb1dda8a0d241ef9bcfdde0aa99c63c 2024-11-21T04:25:53,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742307_1483 (size=121) 2024-11-21T04:25:53,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742307_1483 (size=121) 2024-11-21T04:25:53,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:25:53,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-21T04:25:53,048 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/cf/ceb1dda8a0d241ef9bcfdde0aa99c63c, entries=48, sequenceid=6, filesize=8.2 K 2024-11-21T04:25:53,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-21T04:25:53,049 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:53,049 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:25:53,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742307_1483 (size=121) 2024-11-21T04:25:53,050 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for ba678bc49194292cd74c6a8f0b61d330 in 57ms, sequenceid=6, compaction requested=false 2024-11-21T04:25:53,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2603): Flush status journal for ba678bc49194292cd74c6a8f0b61d330: 2024-11-21T04:25:53,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-21T04:25:53,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:53,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-21T04:25:53,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/cf/ceb1dda8a0d241ef9bcfdde0aa99c63c] hfiles 2024-11-21T04:25:53,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/cf/ceb1dda8a0d241ef9bcfdde0aa99c63c for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:53,055 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef in 211 msec 2024-11-21T04:25:53,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742308_1484 (size=121) 2024-11-21T04:25:53,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742308_1484 (size=121) 2024-11-21T04:25:53,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742308_1484 (size=121) 2024-11-21T04:25:53,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:25:53,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/fdc0a393186c:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=203 2024-11-21T04:25:53,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster(4169): Remote procedure done, pid=203 2024-11-21T04:25:53,069 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:53,069 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:25:53,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=203, resume processing ppid=201 2024-11-21T04:25:53,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ba678bc49194292cd74c6a8f0b61d330 in 231 msec 2024-11-21T04:25:53,079 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-21T04:25:53,080 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-21T04:25:53,082 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-21T04:25:53,082 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:53,083 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:53,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742309_1485 (size=663) 2024-11-21T04:25:53,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742309_1485 (size=663) 2024-11-21T04:25:53,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742309_1485 (size=663) 2024-11-21T04:25:53,112 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-21T04:25:53,123 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-21T04:25:53,124 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:53,127 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-21T04:25:53,127 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-21T04:25:53,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 312 msec 2024-11-21T04:25:53,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-21T04:25:53,132 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-21T04:25:53,132 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163153132 2024-11-21T04:25:53,133 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44067, tgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163153132, rawTgtDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163153132, srcFsUri=hdfs://localhost:44067, srcDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:25:53,200 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44067, inputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a 2024-11-21T04:25:53,200 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163153132, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163153132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:53,202 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-21T04:25:53,219 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163153132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:53,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742310_1486 (size=198) 2024-11-21T04:25:53,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742310_1486 (size=198) 2024-11-21T04:25:53,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742310_1486 (size=198) 2024-11-21T04:25:53,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742311_1487 (size=663) 2024-11-21T04:25:53,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742311_1487 (size=663) 2024-11-21T04:25:53,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742311_1487 (size=663) 2024-11-21T04:25:53,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:53,290 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:53,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:54,371 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-13714524652080659094.jar 2024-11-21T04:25:54,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:54,372 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:54,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop-8976726221070202669.jar 2024-11-21T04:25:54,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:54,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:54,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:54,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:54,443 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:54,443 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-21T04:25:54,443 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-21T04:25:54,443 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-21T04:25:54,443 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-21T04:25:54,443 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-21T04:25:54,444 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-21T04:25:54,444 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-21T04:25:54,444 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-21T04:25:54,444 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-21T04:25:54,444 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-21T04:25:54,444 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-21T04:25:54,445 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-21T04:25:54,445 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:54,445 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:54,445 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:25:54,445 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:54,445 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-21T04:25:54,446 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:25:54,446 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-21T04:25:54,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742312_1488 (size=131440) 2024-11-21T04:25:54,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742312_1488 (size=131440) 2024-11-21T04:25:54,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742312_1488 (size=131440) 2024-11-21T04:25:54,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742313_1489 (size=4188619) 2024-11-21T04:25:54,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742313_1489 (size=4188619) 2024-11-21T04:25:54,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742313_1489 (size=4188619) 2024-11-21T04:25:54,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742314_1490 (size=1323991) 2024-11-21T04:25:54,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742314_1490 (size=1323991) 2024-11-21T04:25:54,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742314_1490 (size=1323991) 2024-11-21T04:25:54,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742315_1491 (size=903734) 2024-11-21T04:25:54,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742315_1491 (size=903734) 2024-11-21T04:25:54,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742315_1491 (size=903734) 2024-11-21T04:25:54,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742316_1492 (size=8360083) 2024-11-21T04:25:54,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742316_1492 (size=8360083) 2024-11-21T04:25:54,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742316_1492 (size=8360083) 2024-11-21T04:25:54,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742317_1493 (size=440656) 2024-11-21T04:25:54,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742317_1493 (size=440656) 2024-11-21T04:25:54,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742317_1493 (size=440656) 2024-11-21T04:25:54,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742318_1494 (size=1877034) 2024-11-21T04:25:54,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742318_1494 (size=1877034) 2024-11-21T04:25:54,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742318_1494 (size=1877034) 2024-11-21T04:25:54,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742319_1495 (size=77835) 2024-11-21T04:25:54,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742319_1495 (size=77835) 2024-11-21T04:25:54,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742319_1495 (size=77835) 2024-11-21T04:25:55,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742320_1496 (size=30949) 2024-11-21T04:25:55,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742320_1496 (size=30949) 2024-11-21T04:25:55,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742320_1496 (size=30949) 2024-11-21T04:25:55,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742321_1497 (size=1597327) 2024-11-21T04:25:55,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742321_1497 (size=1597327) 2024-11-21T04:25:55,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742321_1497 (size=1597327) 2024-11-21T04:25:55,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742322_1498 (size=4695811) 2024-11-21T04:25:55,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742322_1498 (size=4695811) 2024-11-21T04:25:55,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742322_1498 (size=4695811) 2024-11-21T04:25:55,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742323_1499 (size=6424740) 2024-11-21T04:25:55,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742323_1499 (size=6424740) 2024-11-21T04:25:55,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742323_1499 (size=6424740) 2024-11-21T04:25:55,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742324_1500 (size=232957) 2024-11-21T04:25:55,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742324_1500 (size=232957) 2024-11-21T04:25:55,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742324_1500 (size=232957) 2024-11-21T04:25:55,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742325_1501 (size=127628) 2024-11-21T04:25:55,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742325_1501 (size=127628) 2024-11-21T04:25:55,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742325_1501 (size=127628) 2024-11-21T04:25:55,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742326_1502 (size=20406) 2024-11-21T04:25:55,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742326_1502 (size=20406) 2024-11-21T04:25:55,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742326_1502 (size=20406) 2024-11-21T04:25:55,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742327_1503 (size=5175431) 2024-11-21T04:25:55,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742327_1503 (size=5175431) 2024-11-21T04:25:55,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742327_1503 (size=5175431) 2024-11-21T04:25:55,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742328_1504 (size=217634) 2024-11-21T04:25:55,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742328_1504 (size=217634) 2024-11-21T04:25:55,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742328_1504 (size=217634) 2024-11-21T04:25:55,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742329_1505 (size=1832290) 2024-11-21T04:25:55,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742329_1505 (size=1832290) 2024-11-21T04:25:55,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742329_1505 (size=1832290) 2024-11-21T04:25:55,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742330_1506 (size=322274) 2024-11-21T04:25:55,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742330_1506 (size=322274) 2024-11-21T04:25:55,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742330_1506 (size=322274) 2024-11-21T04:25:55,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742331_1507 (size=503880) 2024-11-21T04:25:55,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742331_1507 (size=503880) 2024-11-21T04:25:55,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742331_1507 (size=503880) 2024-11-21T04:25:55,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742332_1508 (size=29229) 2024-11-21T04:25:55,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742332_1508 (size=29229) 2024-11-21T04:25:55,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742332_1508 (size=29229) 2024-11-21T04:25:55,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742333_1509 (size=24096) 2024-11-21T04:25:55,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742333_1509 (size=24096) 2024-11-21T04:25:55,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742333_1509 (size=24096) 2024-11-21T04:25:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742334_1510 (size=111872) 2024-11-21T04:25:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742334_1510 (size=111872) 2024-11-21T04:25:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742334_1510 (size=111872) 2024-11-21T04:25:55,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742335_1511 (size=45609) 2024-11-21T04:25:55,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742335_1511 (size=45609) 2024-11-21T04:25:55,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742335_1511 (size=45609) 2024-11-21T04:25:55,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742336_1512 (size=136454) 2024-11-21T04:25:55,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742336_1512 (size=136454) 2024-11-21T04:25:55,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742336_1512 (size=136454) 2024-11-21T04:25:55,164 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-21T04:25:55,166 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-21T04:25:55,167 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-21T04:25:55,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742337_1513 (size=366) 2024-11-21T04:25:55,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742337_1513 (size=366) 2024-11-21T04:25:55,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742337_1513 (size=366) 2024-11-21T04:25:55,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742338_1514 (size=15) 2024-11-21T04:25:55,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742338_1514 (size=15) 2024-11-21T04:25:55,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742338_1514 (size=15) 2024-11-21T04:25:55,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742339_1515 (size=303904) 2024-11-21T04:25:55,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742339_1515 (size=303904) 2024-11-21T04:25:55,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742339_1515 (size=303904) 2024-11-21T04:25:56,255 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:25:56,255 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-21T04:25:56,259 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0009_000001 (auth:SIMPLE) from 127.0.0.1:45786 2024-11-21T04:25:56,268 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0009/container_1732162945431_0009_01_000001/launch_container.sh] 2024-11-21T04:25:56,268 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0009/container_1732162945431_0009_01_000001/container_tokens] 2024-11-21T04:25:56,268 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-1_0/usercache/jenkins/appcache/application_1732162945431_0009/container_1732162945431_0009_01_000001/sysfs] 2024-11-21T04:25:56,522 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0010_000001 (auth:SIMPLE) from 127.0.0.1:56550 2024-11-21T04:25:57,007 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:25:57,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:25:57,560 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-21T04:25:57,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-21T04:26:01,439 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0010_000001 (auth:SIMPLE) from 127.0.0.1:34266 2024-11-21T04:26:01,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742340_1516 (size=349578) 2024-11-21T04:26:01,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742340_1516 (size=349578) 2024-11-21T04:26:01,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742340_1516 (size=349578) 2024-11-21T04:26:03,062 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:26:03,768 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0010_000001 (auth:SIMPLE) from 127.0.0.1:44114 2024-11-21T04:26:05,116 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fa291fa3994bf7902442d1d129cd311c, had cached 0 bytes from a total of 8258 2024-11-21T04:26:05,117 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fef81f9296f7039a4489a2709be189d3, had cached 0 bytes from a total of 5356 2024-11-21T04:26:07,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742341_1517 (size=8394) 2024-11-21T04:26:07,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742341_1517 (size=8394) 2024-11-21T04:26:07,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742341_1517 (size=8394) 2024-11-21T04:26:07,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742342_1518 (size=5216) 2024-11-21T04:26:07,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742342_1518 (size=5216) 2024-11-21T04:26:07,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742342_1518 (size=5216) 2024-11-21T04:26:07,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742343_1519 (size=17455) 2024-11-21T04:26:07,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742343_1519 (size=17455) 2024-11-21T04:26:07,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742343_1519 (size=17455) 2024-11-21T04:26:07,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742344_1520 (size=476) 2024-11-21T04:26:07,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742344_1520 (size=476) 2024-11-21T04:26:07,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742344_1520 (size=476) 2024-11-21T04:26:07,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742345_1521 (size=17455) 2024-11-21T04:26:07,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742345_1521 (size=17455) 2024-11-21T04:26:07,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742345_1521 (size=17455) 2024-11-21T04:26:07,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742346_1522 (size=349578) 2024-11-21T04:26:07,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742346_1522 (size=349578) 2024-11-21T04:26:07,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742346_1522 (size=349578) 2024-11-21T04:26:07,828 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0010_000001 (auth:SIMPLE) from 127.0.0.1:44118 2024-11-21T04:26:07,839 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732162945431_0010_01_000002 is : 143 2024-11-21T04:26:07,853 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0010/container_1732162945431_0010_01_000002/launch_container.sh] 2024-11-21T04:26:07,853 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0010/container_1732162945431_0010_01_000002/container_tokens] 2024-11-21T04:26:07,853 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0010/container_1732162945431_0010_01_000002/sysfs] 2024-11-21T04:26:09,360 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-21T04:26:09,360 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-21T04:26:09,367 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,367 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-21T04:26:09,368 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-21T04:26:09,368 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,368 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-21T04:26:09,368 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-21T04:26:09,368 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1029908593_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163153132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163153132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,369 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163153132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-21T04:26:09,369 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/export-test/export-1732163153132/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-21T04:26:09,377 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=204, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-21T04:26:09,380 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163169380"}]},"ts":"1732163169380"} 2024-11-21T04:26:09,382 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-21T04:26:09,382 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-21T04:26:09,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=205, ppid=204, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-21T04:26:09,384 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=52ed441b421ce2cc5a5468fd1a59d9ef, UNASSIGN}, {pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ba678bc49194292cd74c6a8f0b61d330, UNASSIGN}] 2024-11-21T04:26:09,385 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ba678bc49194292cd74c6a8f0b61d330, UNASSIGN 2024-11-21T04:26:09,385 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=52ed441b421ce2cc5a5468fd1a59d9ef, UNASSIGN 2024-11-21T04:26:09,386 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=ba678bc49194292cd74c6a8f0b61d330, regionState=CLOSING, regionLocation=fdc0a393186c,42393,1732162938268 2024-11-21T04:26:09,387 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=52ed441b421ce2cc5a5468fd1a59d9ef, regionState=CLOSING, regionLocation=fdc0a393186c,39347,1732162938035 2024-11-21T04:26:09,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ba678bc49194292cd74c6a8f0b61d330, UNASSIGN because future has completed 2024-11-21T04:26:09,389 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:26:09,389 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure ba678bc49194292cd74c6a8f0b61d330, server=fdc0a393186c,42393,1732162938268}] 2024-11-21T04:26:09,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=52ed441b421ce2cc5a5468fd1a59d9ef, UNASSIGN because future has completed 2024-11-21T04:26:09,390 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-21T04:26:09,390 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=209, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef, server=fdc0a393186c,39347,1732162938035}] 2024-11-21T04:26:09,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-21T04:26:09,542 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(122): Close 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:26:09,542 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(122): Close ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:26:09,542 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:26:09,542 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-21T04:26:09,542 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1722): Closing ba678bc49194292cd74c6a8f0b61d330, disabling compactions & flushes 2024-11-21T04:26:09,542 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1722): Closing 52ed441b421ce2cc5a5468fd1a59d9ef, disabling compactions & flushes 2024-11-21T04:26:09,542 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:26:09,542 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:26:09,542 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:26:09,542 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:26:09,542 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. after waiting 0 ms 2024-11-21T04:26:09,542 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. after waiting 0 ms 2024-11-21T04:26:09,542 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:26:09,542 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:26:09,565 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:26:09,566 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:09,566 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef. 2024-11-21T04:26:09,566 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1676): Region close journal for 52ed441b421ce2cc5a5468fd1a59d9ef: Waiting for close lock at 1732163169542Running coprocessor pre-close hooks at 1732163169542Disabling compacts and flushes for region at 1732163169542Disabling writes for close at 1732163169542Writing region close event to WAL at 1732163169543 (+1 ms)Running coprocessor post-close hooks at 1732163169566 (+23 ms)Closed at 1732163169566 2024-11-21T04:26:09,569 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(157): Closed 52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:26:09,569 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=52ed441b421ce2cc5a5468fd1a59d9ef, regionState=CLOSED 2024-11-21T04:26:09,569 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T04:26:09,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef, server=fdc0a393186c,39347,1732162938035 because future has completed 2024-11-21T04:26:09,572 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:09,572 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330. 2024-11-21T04:26:09,572 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1676): Region close journal for ba678bc49194292cd74c6a8f0b61d330: Waiting for close lock at 1732163169542Running coprocessor pre-close hooks at 1732163169542Disabling compacts and flushes for region at 1732163169542Disabling writes for close at 1732163169542Writing region close event to WAL at 1732163169559 (+17 ms)Running coprocessor post-close hooks at 1732163169571 (+12 ms)Closed at 1732163169572 (+1 ms) 2024-11-21T04:26:09,573 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(157): Closed ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:26:09,574 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=ba678bc49194292cd74c6a8f0b61d330, regionState=CLOSED 2024-11-21T04:26:09,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=206 2024-11-21T04:26:09,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=206, state=SUCCESS, hasLock=false; CloseRegionProcedure 52ed441b421ce2cc5a5468fd1a59d9ef, server=fdc0a393186c,39347,1732162938035 in 182 msec 2024-11-21T04:26:09,576 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=52ed441b421ce2cc5a5468fd1a59d9ef, UNASSIGN in 190 msec 2024-11-21T04:26:09,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure ba678bc49194292cd74c6a8f0b61d330, server=fdc0a393186c,42393,1732162938268 because future has completed 2024-11-21T04:26:09,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=207 2024-11-21T04:26:09,579 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; CloseRegionProcedure ba678bc49194292cd74c6a8f0b61d330, server=fdc0a393186c,42393,1732162938268 in 188 msec 2024-11-21T04:26:09,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=205 2024-11-21T04:26:09,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ba678bc49194292cd74c6a8f0b61d330, UNASSIGN in 194 msec 2024-11-21T04:26:09,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=204 2024-11-21T04:26:09,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=204, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 198 msec 2024-11-21T04:26:09,584 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732163169583"}]},"ts":"1732163169583"} 2024-11-21T04:26:09,585 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-21T04:26:09,585 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-21T04:26:09,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 209 msec 2024-11-21T04:26:09,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-21T04:26:09,692 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-21T04:26:09,693 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] procedure2.ProcedureExecutor(1139): Stored pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,698 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,699 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=210, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,701 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42393 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,703 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:26:09,705 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/recovered.edits] 2024-11-21T04:26:09,706 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:26:09,707 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/cf/a06cbc5a046c4494bb0ebb5ed3522840 to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/cf/a06cbc5a046c4494bb0ebb5ed3522840 2024-11-21T04:26:09,709 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef/recovered.edits/9.seqid 2024-11-21T04:26:09,710 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/52ed441b421ce2cc5a5468fd1a59d9ef 2024-11-21T04:26:09,710 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/cf, FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/recovered.edits] 2024-11-21T04:26:09,715 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/cf/ceb1dda8a0d241ef9bcfdde0aa99c63c to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/cf/ceb1dda8a0d241ef9bcfdde0aa99c63c 2024-11-21T04:26:09,717 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/recovered.edits/9.seqid to hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330/recovered.edits/9.seqid 2024-11-21T04:26:09,718 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ba678bc49194292cd74c6a8f0b61d330 2024-11-21T04:26:09,718 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-21T04:26:09,720 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=210, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,723 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-21T04:26:09,726 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-21T04:26:09,727 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=210, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,727 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-21T04:26:09,727 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163169727"}]},"ts":"9223372036854775807"} 2024-11-21T04:26:09,727 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732163169727"}]},"ts":"9223372036854775807"} 2024-11-21T04:26:09,729 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-21T04:26:09,729 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 52ed441b421ce2cc5a5468fd1a59d9ef, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732163151738.52ed441b421ce2cc5a5468fd1a59d9ef.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ba678bc49194292cd74c6a8f0b61d330, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732163151738.ba678bc49194292cd74c6a8f0b61d330.', STARTKEY => '1', ENDKEY => ''}] 2024-11-21T04:26:09,729 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-21T04:26:09,730 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732163169729"}]},"ts":"9223372036854775807"} 2024-11-21T04:26:09,732 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-21T04:26:09,732 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=210, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,733 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 39 msec 2024-11-21T04:26:09,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,954 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-21T04:26:09,954 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-21T04:26:09,954 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-21T04:26:09,954 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-21T04:26:09,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:26:09,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:26:09,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:26:09,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-21T04:26:09,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=210 2024-11-21T04:26:09,965 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:26:09,965 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:26:09,966 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,966 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:26:09,966 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-21T04:26:09,967 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-21T04:26:09,971 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-21T04:26:09,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,973 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-21T04:26:09,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:09,993 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=815 (was 810) Potentially hanging thread: Thread-7016 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1124159837_1 at /127.0.0.1:33734 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 139572) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (625843813) connection to localhost/127.0.0.1:36727 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1124159837_1 at /127.0.0.1:60534 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:60554 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36727 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:33742 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1029908593_22 at /127.0.0.1:34174 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 809), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=694 (was 621) - SystemLoadAverage LEAK? -, ProcessCount=22 (was 21) - ProcessCount LEAK? -, AvailableMemoryMB=1434 (was 1698) 2024-11-21T04:26:09,993 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=815 is superior to 500 2024-11-21T04:26:09,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-21T04:26:10,000 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cb31a64{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-21T04:26:10,003 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9fc67aa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T04:26:10,003 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T04:26:10,003 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f039685{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-21T04:26:10,003 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b506169{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,STOPPED} 2024-11-21T04:26:13,904 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732162945431_0010_000001 (auth:SIMPLE) from 127.0.0.1:60036 2024-11-21T04:26:13,915 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0010/container_1732162945431_0010_01_000001/launch_container.sh] 2024-11-21T04:26:13,915 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0010/container_1732162945431_0010_01_000001/container_tokens] 2024-11-21T04:26:13,915 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_16140452/yarn-5853840925/MiniMRCluster_16140452-localDir-nm-0_1/usercache/jenkins/appcache/application_1732162945431_0010/container_1732162945431_0010_01_000001/sysfs] 2024-11-21T04:26:15,088 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:26:16,350 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:26:17,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-21T04:26:23,062 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:26:27,021 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@410c57a8{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-21T04:26:27,021 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b981737{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T04:26:27,022 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T04:26:27,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fc26d2b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-21T04:26:27,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c2b2bf3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,STOPPED} 2024-11-21T04:26:44,037 ERROR [Thread[Thread-404,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-21T04:26:44,038 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@40dd878e{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-21T04:26:44,039 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6586f66b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T04:26:44,039 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T04:26:44,040 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fa2fe52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-21T04:26:44,040 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@779c58eb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,STOPPED} 2024-11-21T04:26:44,043 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-21T04:26:44,047 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-21T04:26:44,047 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-21T04:26:44,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741830_1006 (size=969145) 2024-11-21T04:26:44,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741830_1006 (size=969145) 2024-11-21T04:26:44,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741830_1006 (size=969145) 2024-11-21T04:26:44,053 ERROR [Thread[Thread-422,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-21T04:26:44,056 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b1ca313{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-21T04:26:44,056 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@32a9ec01{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T04:26:44,056 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T04:26:44,056 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@52bb7a0a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-21T04:26:44,056 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@753579c8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,STOPPED} 2024-11-21T04:26:44,058 ERROR [Thread[Thread-386,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-21T04:26:44,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-21T04:26:44,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T04:26:44,058 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T04:26:44,058 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T04:26:44,058 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:26:44,058 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:26:44,058 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T04:26:44,058 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T04:26:44,059 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=535783467, stopped=false 2024-11-21T04:26:44,059 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:44,059 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-21T04:26:44,059 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=fdc0a393186c,39577,1732162937127 2024-11-21T04:26:44,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T04:26:44,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T04:26:44,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T04:26:44,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-21T04:26:44,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:26:44,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:26:44,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:26:44,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:26:44,115 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T04:26:44,117 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T04:26:44,117 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T04:26:44,118 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T04:26:44,119 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T04:26:44,119 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T04:26:44,119 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:26:44,119 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-21T04:26:44,120 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fdc0a393186c,39347,1732162938035' ***** 2024-11-21T04:26:44,120 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:44,120 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T04:26:44,120 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fdc0a393186c,34363,1732162938185' ***** 2024-11-21T04:26:44,120 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:44,121 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T04:26:44,121 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fdc0a393186c,42393,1732162938268' ***** 2024-11-21T04:26:44,121 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T04:26:44,121 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:44,121 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T04:26:44,121 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T04:26:44,121 INFO [RS:0;fdc0a393186c:39347 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T04:26:44,121 INFO [RS:1;fdc0a393186c:34363 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T04:26:44,121 INFO [RS:0;fdc0a393186c:39347 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T04:26:44,121 INFO [RS:1;fdc0a393186c:34363 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T04:26:44,121 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T04:26:44,121 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T04:26:44,121 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(959): stopping server fdc0a393186c,34363,1732162938185 2024-11-21T04:26:44,121 INFO [RS:1;fdc0a393186c:34363 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T04:26:44,121 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T04:26:44,121 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(3091): Received CLOSE for fef81f9296f7039a4489a2709be189d3 2024-11-21T04:26:44,121 INFO [RS:1;fdc0a393186c:34363 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;fdc0a393186c:34363. 2024-11-21T04:26:44,122 INFO [RS:2;fdc0a393186c:42393 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T04:26:44,122 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T04:26:44,122 INFO [RS:2;fdc0a393186c:42393 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T04:26:44,122 DEBUG [RS:1;fdc0a393186c:34363 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T04:26:44,122 DEBUG [RS:1;fdc0a393186c:34363 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:26:44,122 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(3091): Received CLOSE for fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:26:44,122 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(959): stopping server fdc0a393186c,39347,1732162938035 2024-11-21T04:26:44,122 INFO [RS:0;fdc0a393186c:39347 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T04:26:44,122 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(3091): Received CLOSE for 796b8a6daae5bbdc223e78db15304576 2024-11-21T04:26:44,122 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T04:26:44,122 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(959): stopping server fdc0a393186c,42393,1732162938268 2024-11-21T04:26:44,122 INFO [RS:0;fdc0a393186c:39347 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;fdc0a393186c:39347. 2024-11-21T04:26:44,122 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T04:26:44,122 INFO [RS:2;fdc0a393186c:42393 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T04:26:44,122 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T04:26:44,122 DEBUG [RS:0;fdc0a393186c:39347 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T04:26:44,122 INFO [RS:2;fdc0a393186c:42393 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;fdc0a393186c:42393. 2024-11-21T04:26:44,122 DEBUG [RS:0;fdc0a393186c:39347 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:26:44,122 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T04:26:44,122 DEBUG [RS:2;fdc0a393186c:42393 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T04:26:44,122 DEBUG [RS:2;fdc0a393186c:42393 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:26:44,122 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T04:26:44,122 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T04:26:44,122 DEBUG [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(1325): Online Regions={fa291fa3994bf7902442d1d129cd311c=testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c., 796b8a6daae5bbdc223e78db15304576=hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576.} 2024-11-21T04:26:44,122 DEBUG [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(1325): Online Regions={fef81f9296f7039a4489a2709be189d3=testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3.} 2024-11-21T04:26:44,122 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing fef81f9296f7039a4489a2709be189d3, disabling compactions & flushes 2024-11-21T04:26:44,122 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T04:26:44,122 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:26:44,122 DEBUG [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T04:26:44,122 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:26:44,122 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. after waiting 0 ms 2024-11-21T04:26:44,123 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:26:44,123 DEBUG [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(1351): Waiting on 796b8a6daae5bbdc223e78db15304576, fa291fa3994bf7902442d1d129cd311c 2024-11-21T04:26:44,123 DEBUG [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(1351): Waiting on fef81f9296f7039a4489a2709be189d3 2024-11-21T04:26:44,123 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T04:26:44,123 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T04:26:44,123 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T04:26:44,123 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T04:26:44,123 DEBUG [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T04:26:44,123 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T04:26:44,123 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing fa291fa3994bf7902442d1d129cd311c, disabling compactions & flushes 2024-11-21T04:26:44,123 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:26:44,123 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:26:44,123 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. after waiting 0 ms 2024-11-21T04:26:44,123 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=69.66 KB heapSize=111.04 KB 2024-11-21T04:26:44,123 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:26:44,123 INFO [regionserver/fdc0a393186c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T04:26:44,123 INFO [regionserver/fdc0a393186c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T04:26:44,123 INFO [regionserver/fdc0a393186c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T04:26:44,123 INFO [regionserver/fdc0a393186c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T04:26:44,127 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fef81f9296f7039a4489a2709be189d3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T04:26:44,127 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:44,127 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:26:44,128 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for fef81f9296f7039a4489a2709be189d3: Waiting for close lock at 1732163204122Running coprocessor pre-close hooks at 1732163204122Disabling compacts and flushes for region at 1732163204122Disabling writes for close at 1732163204122Writing region close event to WAL at 1732163204123 (+1 ms)Running coprocessor post-close hooks at 1732163204127 (+4 ms)Closed at 1732163204127 2024-11-21T04:26:44,128 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1732163074773.fef81f9296f7039a4489a2709be189d3. 2024-11-21T04:26:44,128 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/default/testExportExpiredSnapshot/fa291fa3994bf7902442d1d129cd311c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T04:26:44,129 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:44,129 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:26:44,129 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for fa291fa3994bf7902442d1d129cd311c: Waiting for close lock at 1732163204123Running coprocessor pre-close hooks at 1732163204123Disabling compacts and flushes for region at 1732163204123Disabling writes for close at 1732163204123Writing region close event to WAL at 1732163204123Running coprocessor post-close hooks at 1732163204129 (+6 ms)Closed at 1732163204129 2024-11-21T04:26:44,129 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c. 2024-11-21T04:26:44,129 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 796b8a6daae5bbdc223e78db15304576, disabling compactions & flushes 2024-11-21T04:26:44,129 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:26:44,129 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:26:44,129 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. after waiting 0 ms 2024-11-21T04:26:44,129 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:26:44,129 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 796b8a6daae5bbdc223e78db15304576 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-11-21T04:26:44,138 INFO [regionserver/fdc0a393186c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T04:26:44,144 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/acl/796b8a6daae5bbdc223e78db15304576/.tmp/l/25eb946ab97d4e18849ae8dd77ae1918 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1732163072376/DeleteFamily/seqid=0 2024-11-21T04:26:44,148 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/info/fba58037d9844d1dbe0faca6312a211e is 173, key is testExportExpiredSnapshot,1,1732163074773.fa291fa3994bf7902442d1d129cd311c./info:regioninfo/1732163075133/Put/seqid=0 2024-11-21T04:26:44,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742347_1523 (size=5695) 2024-11-21T04:26:44,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742347_1523 (size=5695) 2024-11-21T04:26:44,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742347_1523 (size=5695) 2024-11-21T04:26:44,151 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/acl/796b8a6daae5bbdc223e78db15304576/.tmp/l/25eb946ab97d4e18849ae8dd77ae1918 2024-11-21T04:26:44,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742348_1524 (size=14362) 2024-11-21T04:26:44,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742348_1524 (size=14362) 2024-11-21T04:26:44,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742348_1524 (size=14362) 2024-11-21T04:26:44,153 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=59.12 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/info/fba58037d9844d1dbe0faca6312a211e 2024-11-21T04:26:44,155 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 25eb946ab97d4e18849ae8dd77ae1918 2024-11-21T04:26:44,156 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/acl/796b8a6daae5bbdc223e78db15304576/.tmp/l/25eb946ab97d4e18849ae8dd77ae1918 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/acl/796b8a6daae5bbdc223e78db15304576/l/25eb946ab97d4e18849ae8dd77ae1918 2024-11-21T04:26:44,159 INFO [regionserver/fdc0a393186c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T04:26:44,160 INFO [regionserver/fdc0a393186c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T04:26:44,164 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 25eb946ab97d4e18849ae8dd77ae1918 2024-11-21T04:26:44,164 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/acl/796b8a6daae5bbdc223e78db15304576/l/25eb946ab97d4e18849ae8dd77ae1918, entries=12, sequenceid=27, filesize=5.6 K 2024-11-21T04:26:44,165 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 796b8a6daae5bbdc223e78db15304576 in 35ms, sequenceid=27, compaction requested=false 2024-11-21T04:26:44,168 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/acl/796b8a6daae5bbdc223e78db15304576/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-11-21T04:26:44,168 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:44,168 INFO [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:26:44,168 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 796b8a6daae5bbdc223e78db15304576: Waiting for close lock at 1732163204129Running coprocessor pre-close hooks at 1732163204129Disabling compacts and flushes for region at 1732163204129Disabling writes for close at 1732163204129Obtaining lock to block concurrent updates at 1732163204129Preparing flush snapshotting stores in 796b8a6daae5bbdc223e78db15304576 at 1732163204129Finished memstore snapshotting hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576., syncing WAL and waiting on mvcc, flushsize=dataSize=1412, getHeapSize=3392, getOffHeapSize=0, getCellsCount=23 at 1732163204129Flushing stores of hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. at 1732163204130 (+1 ms)Flushing 796b8a6daae5bbdc223e78db15304576/l: creating writer at 1732163204130Flushing 796b8a6daae5bbdc223e78db15304576/l: appending metadata at 1732163204144 (+14 ms)Flushing 796b8a6daae5bbdc223e78db15304576/l: closing flushed file at 1732163204144Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f3c7655: reopening flushed file at 1732163204156 (+12 ms)Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 796b8a6daae5bbdc223e78db15304576 in 35ms, sequenceid=27, compaction requested=false at 1732163204165 (+9 ms)Writing region close event to WAL at 1732163204165Running coprocessor post-close hooks at 1732163204168 (+3 ms)Closed at 1732163204168 2024-11-21T04:26:44,168 DEBUG [RS_CLOSE_REGION-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1732162941451.796b8a6daae5bbdc223e78db15304576. 2024-11-21T04:26:44,172 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/ns/30fb3bc7a30243c7aa72ed951f87377d is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a./ns:/1732163072400/DeleteFamily/seqid=0 2024-11-21T04:26:44,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742349_1525 (size=7779) 2024-11-21T04:26:44,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742349_1525 (size=7779) 2024-11-21T04:26:44,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742349_1525 (size=7779) 2024-11-21T04:26:44,180 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.23 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/ns/30fb3bc7a30243c7aa72ed951f87377d 2024-11-21T04:26:44,197 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/rep_barrier/58a834852ed24fd48c39869c11debb3e is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a./rep_barrier:/1732163072400/DeleteFamily/seqid=0 2024-11-21T04:26:44,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742350_1526 (size=8005) 2024-11-21T04:26:44,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742350_1526 (size=8005) 2024-11-21T04:26:44,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742350_1526 (size=8005) 2024-11-21T04:26:44,202 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/rep_barrier/58a834852ed24fd48c39869c11debb3e 2024-11-21T04:26:44,220 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/table/fbd3ef2dc87b45c5bb532582820b4504 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732163054742.263f5d8fad900dc5a7f481f25a73636a./table:/1732163072400/DeleteFamily/seqid=0 2024-11-21T04:26:44,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742351_1527 (size=8758) 2024-11-21T04:26:44,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742351_1527 (size=8758) 2024-11-21T04:26:44,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742351_1527 (size=8758) 2024-11-21T04:26:44,225 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.97 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/table/fbd3ef2dc87b45c5bb532582820b4504 2024-11-21T04:26:44,229 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/info/fba58037d9844d1dbe0faca6312a211e as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/info/fba58037d9844d1dbe0faca6312a211e 2024-11-21T04:26:44,234 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/info/fba58037d9844d1dbe0faca6312a211e, entries=74, sequenceid=199, filesize=14.0 K 2024-11-21T04:26:44,234 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/ns/30fb3bc7a30243c7aa72ed951f87377d as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/ns/30fb3bc7a30243c7aa72ed951f87377d 2024-11-21T04:26:44,239 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/ns/30fb3bc7a30243c7aa72ed951f87377d, entries=23, sequenceid=199, filesize=7.6 K 2024-11-21T04:26:44,240 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/rep_barrier/58a834852ed24fd48c39869c11debb3e as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/rep_barrier/58a834852ed24fd48c39869c11debb3e 2024-11-21T04:26:44,245 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/rep_barrier/58a834852ed24fd48c39869c11debb3e, entries=21, sequenceid=199, filesize=7.8 K 2024-11-21T04:26:44,245 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/.tmp/table/fbd3ef2dc87b45c5bb532582820b4504 as hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/table/fbd3ef2dc87b45c5bb532582820b4504 2024-11-21T04:26:44,249 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/table/fbd3ef2dc87b45c5bb532582820b4504, entries=36, sequenceid=199, filesize=8.6 K 2024-11-21T04:26:44,250 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 127ms, sequenceid=199, compaction requested=false 2024-11-21T04:26:44,255 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/data/hbase/meta/1588230740/recovered.edits/202.seqid, newMaxSeqId=202, maxSeqId=1 2024-11-21T04:26:44,255 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:44,255 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T04:26:44,255 INFO [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T04:26:44,255 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732163204122Running coprocessor pre-close hooks at 1732163204122Disabling compacts and flushes for region at 1732163204122Disabling writes for close at 1732163204123 (+1 ms)Obtaining lock to block concurrent updates at 1732163204123Preparing flush snapshotting stores in 1588230740 at 1732163204123Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=71334, getHeapSize=113640, getOffHeapSize=0, getCellsCount=548 at 1732163204123Flushing stores of hbase:meta,,1.1588230740 at 1732163204124 (+1 ms)Flushing 1588230740/info: creating writer at 1732163204124Flushing 1588230740/info: appending metadata at 1732163204148 (+24 ms)Flushing 1588230740/info: closing flushed file at 1732163204148Flushing 1588230740/ns: creating writer at 1732163204157 (+9 ms)Flushing 1588230740/ns: appending metadata at 1732163204171 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732163204172 (+1 ms)Flushing 1588230740/rep_barrier: creating writer at 1732163204184 (+12 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732163204196 (+12 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732163204196Flushing 1588230740/table: creating writer at 1732163204206 (+10 ms)Flushing 1588230740/table: appending metadata at 1732163204220 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732163204220Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@255467d: reopening flushed file at 1732163204228 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ce4cbc1: reopening flushed file at 1732163204234 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f8aec1b: reopening flushed file at 1732163204239 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3bbc6c1c: reopening flushed file at 1732163204245 (+6 ms)Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 127ms, sequenceid=199, compaction requested=false at 1732163204250 (+5 ms)Writing region close event to WAL at 1732163204252 (+2 ms)Running coprocessor post-close hooks at 1732163204255 (+3 ms)Closed at 1732163204255 2024-11-21T04:26:44,255 DEBUG [RS_CLOSE_META-regionserver/fdc0a393186c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T04:26:44,323 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(976): stopping server fdc0a393186c,42393,1732162938268; all regions closed. 2024-11-21T04:26:44,323 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(976): stopping server fdc0a393186c,39347,1732162938035; all regions closed. 2024-11-21T04:26:44,323 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(976): stopping server fdc0a393186c,34363,1732162938185; all regions closed. 2024-11-21T04:26:44,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741833_1009 (size=9420) 2024-11-21T04:26:44,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741833_1009 (size=9420) 2024-11-21T04:26:44,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741833_1009 (size=9420) 2024-11-21T04:26:44,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741835_1011 (size=15851) 2024-11-21T04:26:44,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741836_1012 (size=81723) 2024-11-21T04:26:44,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741836_1012 (size=81723) 2024-11-21T04:26:44,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741835_1011 (size=15851) 2024-11-21T04:26:44,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741835_1011 (size=15851) 2024-11-21T04:26:44,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741836_1012 (size=81723) 2024-11-21T04:26:44,335 DEBUG [RS:0;fdc0a393186c:39347 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/oldWALs 2024-11-21T04:26:44,335 DEBUG [RS:1;fdc0a393186c:34363 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/oldWALs 2024-11-21T04:26:44,335 DEBUG [RS:2;fdc0a393186c:42393 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/oldWALs 2024-11-21T04:26:44,336 INFO [RS:0;fdc0a393186c:39347 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fdc0a393186c%2C39347%2C1732162938035:(num 1732162940308) 2024-11-21T04:26:44,336 INFO [RS:2;fdc0a393186c:42393 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fdc0a393186c%2C42393%2C1732162938268:(num 1732162940313) 2024-11-21T04:26:44,336 INFO [RS:1;fdc0a393186c:34363 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fdc0a393186c%2C34363%2C1732162938185.meta:.meta(num 1732162940941) 2024-11-21T04:26:44,336 DEBUG [RS:0;fdc0a393186c:39347 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:26:44,336 DEBUG [RS:2;fdc0a393186c:42393 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:26:44,336 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T04:26:44,336 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T04:26:44,336 INFO [RS:2;fdc0a393186c:42393 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T04:26:44,336 INFO [RS:0;fdc0a393186c:39347 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T04:26:44,336 INFO [RS:0;fdc0a393186c:39347 {}] hbase.ChoreService(370): Chore service for: regionserver/fdc0a393186c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T04:26:44,336 INFO [RS:2;fdc0a393186c:42393 {}] hbase.ChoreService(370): Chore service for: regionserver/fdc0a393186c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T04:26:44,336 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T04:26:44,336 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T04:26:44,336 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T04:26:44,336 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T04:26:44,336 INFO [regionserver/fdc0a393186c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T04:26:44,336 INFO [regionserver/fdc0a393186c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T04:26:44,336 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T04:26:44,336 INFO [RS:2;fdc0a393186c:42393 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T04:26:44,336 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T04:26:44,336 INFO [RS:0;fdc0a393186c:39347 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T04:26:44,337 INFO [RS:0;fdc0a393186c:39347 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39347 2024-11-21T04:26:44,337 INFO [RS:2;fdc0a393186c:42393 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42393 2024-11-21T04:26:44,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073741834_1010 (size=11682) 2024-11-21T04:26:44,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073741834_1010 (size=11682) 2024-11-21T04:26:44,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073741834_1010 (size=11682) 2024-11-21T04:26:44,340 DEBUG [RS:1;fdc0a393186c:34363 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/oldWALs 2024-11-21T04:26:44,340 INFO [RS:1;fdc0a393186c:34363 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fdc0a393186c%2C34363%2C1732162938185:(num 1732162940303) 2024-11-21T04:26:44,340 DEBUG [RS:1;fdc0a393186c:34363 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T04:26:44,340 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T04:26:44,340 INFO [RS:1;fdc0a393186c:34363 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T04:26:44,340 INFO [RS:1;fdc0a393186c:34363 {}] hbase.ChoreService(370): Chore service for: regionserver/fdc0a393186c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T04:26:44,340 INFO [RS:1;fdc0a393186c:34363 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T04:26:44,340 INFO [regionserver/fdc0a393186c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T04:26:44,341 INFO [RS:1;fdc0a393186c:34363 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34363 2024-11-21T04:26:44,364 INFO [RS:0;fdc0a393186c:39347 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T04:26:44,364 INFO [RS:2;fdc0a393186c:42393 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T04:26:44,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fdc0a393186c,42393,1732162938268 2024-11-21T04:26:44,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fdc0a393186c,39347,1732162938035 2024-11-21T04:26:44,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-21T04:26:44,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fdc0a393186c,34363,1732162938185 2024-11-21T04:26:44,373 INFO [RS:1;fdc0a393186c:34363 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T04:26:44,382 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fdc0a393186c,39347,1732162938035] 2024-11-21T04:26:44,398 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fdc0a393186c,39347,1732162938035 already deleted, retry=false 2024-11-21T04:26:44,398 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fdc0a393186c,39347,1732162938035 expired; onlineServers=2 2024-11-21T04:26:44,398 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fdc0a393186c,34363,1732162938185] 2024-11-21T04:26:44,406 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fdc0a393186c,34363,1732162938185 already deleted, retry=false 2024-11-21T04:26:44,406 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fdc0a393186c,34363,1732162938185 expired; onlineServers=1 2024-11-21T04:26:44,406 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fdc0a393186c,42393,1732162938268] 2024-11-21T04:26:44,414 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fdc0a393186c,42393,1732162938268 already deleted, retry=false 2024-11-21T04:26:44,415 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fdc0a393186c,42393,1732162938268 expired; onlineServers=0 2024-11-21T04:26:44,415 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'fdc0a393186c,39577,1732162937127' ***** 2024-11-21T04:26:44,415 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T04:26:44,416 INFO [M:0;fdc0a393186c:39577 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T04:26:44,416 INFO [M:0;fdc0a393186c:39577 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T04:26:44,416 DEBUG [M:0;fdc0a393186c:39577 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T04:26:44,417 DEBUG [M:0;fdc0a393186c:39577 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T04:26:44,417 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T04:26:44,417 DEBUG [master/fdc0a393186c:0:becomeActiveMaster-HFileCleaner.small.0-1732162939834 {}] cleaner.HFileCleaner(306): Exit Thread[master/fdc0a393186c:0:becomeActiveMaster-HFileCleaner.small.0-1732162939834,5,FailOnTimeoutGroup] 2024-11-21T04:26:44,417 DEBUG [master/fdc0a393186c:0:becomeActiveMaster-HFileCleaner.large.0-1732162939833 {}] cleaner.HFileCleaner(306): Exit Thread[master/fdc0a393186c:0:becomeActiveMaster-HFileCleaner.large.0-1732162939833,5,FailOnTimeoutGroup] 2024-11-21T04:26:44,418 INFO [M:0;fdc0a393186c:39577 {}] hbase.ChoreService(370): Chore service for: master/fdc0a393186c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T04:26:44,418 INFO [M:0;fdc0a393186c:39577 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T04:26:44,418 DEBUG [M:0;fdc0a393186c:39577 {}] master.HMaster(1795): Stopping service threads 2024-11-21T04:26:44,418 INFO [M:0;fdc0a393186c:39577 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T04:26:44,419 INFO [M:0;fdc0a393186c:39577 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T04:26:44,419 INFO [M:0;fdc0a393186c:39577 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T04:26:44,420 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T04:26:44,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-21T04:26:44,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-21T04:26:44,423 DEBUG [M:0;fdc0a393186c:39577 {}] zookeeper.ZKUtil(347): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-21T04:26:44,423 WARN [M:0;fdc0a393186c:39577 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T04:26:44,424 INFO [M:0;fdc0a393186c:39577 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/.lastflushedseqids 2024-11-21T04:26:44,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42055 is added to blk_1073742352_1528 (size=329) 2024-11-21T04:26:44,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38043 is added to blk_1073742352_1528 (size=329) 2024-11-21T04:26:44,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33643 is added to blk_1073742352_1528 (size=329) 2024-11-21T04:26:44,439 INFO [M:0;fdc0a393186c:39577 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T04:26:44,440 INFO [M:0;fdc0a393186c:39577 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T04:26:44,440 DEBUG [M:0;fdc0a393186c:39577 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T04:26:44,452 INFO [M:0;fdc0a393186c:39577 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T04:26:44,452 DEBUG [M:0;fdc0a393186c:39577 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T04:26:44,452 DEBUG [M:0;fdc0a393186c:39577 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T04:26:44,452 DEBUG [M:0;fdc0a393186c:39577 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T04:26:44,452 INFO [M:0;fdc0a393186c:39577 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=823.96 KB heapSize=988.77 KB 2024-11-21T04:26:44,452 ERROR [AsyncFSWAL-0-hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData-prefix:fdc0a393186c,39577,1732162937127 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData-prefix:fdc0a393186c,39577,1732162937127,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T04:26:44,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T04:26:44,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T04:26:44,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39347-0x1015cea71210001, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T04:26:44,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42393-0x1015cea71210003, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T04:26:44,482 INFO [RS:2;fdc0a393186c:42393 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T04:26:44,482 INFO [RS:0;fdc0a393186c:39347 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T04:26:44,483 INFO [RS:2;fdc0a393186c:42393 {}] regionserver.HRegionServer(1031): Exiting; stopping=fdc0a393186c,42393,1732162938268; zookeeper connection closed. 2024-11-21T04:26:44,483 INFO [RS:0;fdc0a393186c:39347 {}] regionserver.HRegionServer(1031): Exiting; stopping=fdc0a393186c,39347,1732162938035; zookeeper connection closed. 2024-11-21T04:26:44,483 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1f84696f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1f84696f 2024-11-21T04:26:44,483 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6c984934 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6c984934 2024-11-21T04:26:44,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T04:26:44,490 INFO [RS:1;fdc0a393186c:34363 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T04:26:44,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34363-0x1015cea71210002, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T04:26:44,490 INFO [RS:1;fdc0a393186c:34363 {}] regionserver.HRegionServer(1031): Exiting; stopping=fdc0a393186c,34363,1732162938185; zookeeper connection closed. 2024-11-21T04:26:44,490 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@77e92e1b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@77e92e1b 2024-11-21T04:26:44,490 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-21T04:26:46,350 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:26:47,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:47,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T04:26:47,560 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T04:26:47,562 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-21T04:26:47,563 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-21T04:26:47,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:47,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-21T04:26:47,564 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-21T04:26:49,652 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-21T04:27:16,351 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:27:18,425 DEBUG [master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-11-21T04:27:18,425 DEBUG [master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-21T04:27:26,546 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;fdc0a393186c:39577 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 14 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@3700b8b5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58080c77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3367 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.CountDownLatch$Sync@54558e61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11077 Waited count: 11571 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@5c07e7ee Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@447423ad Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@1f8d300d-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44301}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 37 Waited count: 2855 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d5da314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44067): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 32609 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1208 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@746c915 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44067): State: TIMED_WAITING Blocked count: 114 Waited count: 2031 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44067): State: TIMED_WAITING Blocked count: 91 Waited count: 2016 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44067): State: TIMED_WAITING Blocked count: 112 Waited count: 2037 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44067): State: TIMED_WAITING Blocked count: 102 Waited count: 2035 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44067): State: TIMED_WAITING Blocked count: 94 Waited count: 2024 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 166 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1812730390)): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp350088835-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp350088835-88-acceptor-0@60ab5dc9-ServerConnector@26b3760d{HTTP/1.1, (http/1.1)}{localhost:45091}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp350088835-89): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp350088835-90): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-edcf1f6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@724f8cec): State: TIMED_WAITING Blocked count: 0 Waited count: 660 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 36971): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 270 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a43b7bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1209 Waited count: 1302 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5d4816ad): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 331 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 331 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 331 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 331 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (625843813) connection to localhost/127.0.0.1:44067 from jenkins): State: TIMED_WAITING Blocked count: 1089 Waited count: 1090 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1533570740-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1533570740-122-acceptor-0@515ec047-ServerConnector@2f8fc5e5{HTTP/1.1, (http/1.1)}{localhost:39919}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (IPC Parameter Sending Thread for localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 0 Waited count: 1739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1533570740-123): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1533570740-124): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-3f2947df-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3563764c): State: TIMED_WAITING Blocked count: 0 Waited count: 660 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 37319): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 278 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36855e10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1274 Waited count: 1303 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1a7c72f1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 331 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 333 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 349 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 347 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1278567483-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1278567483-154-acceptor-0@4022ab3e-ServerConnector@1ec9a36b{HTTP/1.1, (http/1.1)}{localhost:43805}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1278567483-155): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1278567483-156): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-51f35b56-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@303700ae): State: TIMED_WAITING Blocked count: 0 Waited count: 659 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 46183): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 252 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@348462c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1189 Waited count: 1285 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@213622aa): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 343 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 334 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (ForkJoinPool-2-worker-3): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.ForkJoinPool@2f435c5a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 209 (ForkJoinPool-2-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 215 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@38dc0a4e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@1e899c91[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@a38f3a0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60527): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 165 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 309 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@efded71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:60527):): State: WAITING Blocked count: 0 Waited count: 401 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6967c2f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 434 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f457e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 249 (LeaseRenewer:jenkins@localhost:44067): State: TIMED_WAITING Blocked count: 8 Waited count: 340 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7ccd7a57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 310 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:60527)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 22 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b3ea3ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 64 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@421a6557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 6 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5592a081 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577): State: WAITING Blocked count: 117 Waited count: 417 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d3a4ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577): State: WAITING Blocked count: 99 Waited count: 440 Waiting on java.util.concurrent.Semaphore$NonfairSync@53e5aa46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577): State: WAITING Blocked count: 32 Waited count: 6910 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42c44db0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39577): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2927bc1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2927bc1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7db992e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@370a089 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@63b9d49a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@72e4f75f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43fb4563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 315 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 337 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 82 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;fdc0a393186c:39577): State: TIMED_WAITING Blocked count: 12 Waited count: 2671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1103/0x00007fee20f95478.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 360 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 366 (org.apache.hadoop.hdfs.PeerCache@484ad824): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 385 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3253 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 402 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 89 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 32465 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 437 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 438 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 461 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@96f57ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 483 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@209430a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ca79557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6625af1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 513 (LeaseRenewer:jenkins.hfs.1@localhost:44067): State: TIMED_WAITING Blocked count: 7 Waited count: 336 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Inactive Thread 517 (LeaseRenewer:jenkins.hfs.2@localhost:44067): State: TIMED_WAITING Blocked count: 8 Waited count: 337 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 32288 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 540 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 566 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 313 Waiting on java.util.concurrent.ForkJoinPool@1494f5e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 593 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 993 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 385 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1056 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3fefdbd7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1146 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1252 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 435 Waiting on java.util.concurrent.ForkJoinPool@1494f5e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1471 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@dee3101 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1637 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2783 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2784 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4493 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4494 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4495 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8077 (AsyncFSWAL-1-hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData-prefix:fdc0a393186c,39577,1732162937127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ee6f6b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8080 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-21T04:27:46,351 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:28:16,351 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;fdc0a393186c:39577 224 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 14 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@3700b8b5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58080c77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3967 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.CountDownLatch$Sync@45ed6dcd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11077 Waited count: 11572 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@5c07e7ee Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@447423ad Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 787 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@1f8d300d-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44301}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 37 Waited count: 2855 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d5da314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44067): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 132 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 38536 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1208 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@746c915 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44067): State: TIMED_WAITING Blocked count: 114 Waited count: 2091 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44067): State: TIMED_WAITING Blocked count: 93 Waited count: 2077 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44067): State: TIMED_WAITING Blocked count: 112 Waited count: 2097 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44067): State: TIMED_WAITING Blocked count: 102 Waited count: 2095 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44067): State: TIMED_WAITING Blocked count: 97 Waited count: 2085 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 196 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1812730390)): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp350088835-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp350088835-88-acceptor-0@60ab5dc9-ServerConnector@26b3760d{HTTP/1.1, (http/1.1)}{localhost:45091}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp350088835-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp350088835-90): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-edcf1f6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@724f8cec): State: TIMED_WAITING Blocked count: 0 Waited count: 780 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 36971): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 290 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a43b7bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1230 Waited count: 1343 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5d4816ad): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (625843813) connection to localhost/127.0.0.1:44067 from jenkins): State: TIMED_WAITING Blocked count: 1136 Waited count: 1137 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1533570740-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1533570740-122-acceptor-0@515ec047-ServerConnector@2f8fc5e5{HTTP/1.1, (http/1.1)}{localhost:39919}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (IPC Parameter Sending Thread for localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 0 Waited count: 1794 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1533570740-123): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1533570740-124): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-3f2947df-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3563764c): State: TIMED_WAITING Blocked count: 0 Waited count: 780 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 37319): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 298 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36855e10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1294 Waited count: 1347 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1a7c72f1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 393 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 409 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 407 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1278567483-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1278567483-154-acceptor-0@4022ab3e-ServerConnector@1ec9a36b{HTTP/1.1, (http/1.1)}{localhost:43805}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1278567483-155): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1278567483-156): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-51f35b56-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@303700ae): State: TIMED_WAITING Blocked count: 0 Waited count: 779 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 46183): State: TIMED_WAITING Blocked count: 1 Waited count: 40 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 78 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 272 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@348462c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1209 Waited count: 1325 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@213622aa): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 403 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 396 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 404 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 394 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (ForkJoinPool-2-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 215 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@38dc0a4e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@1e899c91[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@a38f3a0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60527): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 195 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 314 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@efded71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:60527):): State: WAITING Blocked count: 0 Waited count: 406 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6967c2f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 439 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f457e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7ccd7a57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:60527)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 22 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b3ea3ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 64 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@421a6557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 81 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5592a081 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577): State: WAITING Blocked count: 117 Waited count: 417 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d3a4ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577): State: WAITING Blocked count: 99 Waited count: 440 Waiting on java.util.concurrent.Semaphore$NonfairSync@53e5aa46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577): State: WAITING Blocked count: 32 Waited count: 6910 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42c44db0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39577): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2927bc1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2927bc1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7db992e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@370a089 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@63b9d49a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@72e4f75f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43fb4563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 315 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 337 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 82 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;fdc0a393186c:39577): State: TIMED_WAITING Blocked count: 12 Waited count: 2671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1103/0x00007fee20f95478.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 360 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 366 (org.apache.hadoop.hdfs.PeerCache@484ad824): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 385 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3852 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 402 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 89 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24fb068d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 38468 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 437 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 438 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 461 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@96f57ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 483 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@209430a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ca79557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6625af1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 38290 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 540 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 566 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 313 Waiting on java.util.concurrent.ForkJoinPool@1494f5e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 593 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 993 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1056 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3fefdbd7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1146 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1252 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 436 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1471 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@dee3101 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2783 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2784 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4493 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4494 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4495 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8077 (AsyncFSWAL-1-hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData-prefix:fdc0a393186c,39577,1732162937127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ee6f6b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8080 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-21T04:28:46,351 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:29:16,352 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;fdc0a393186c:39577 222 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 14 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@3700b8b5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58080c77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4566 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.CountDownLatch$Sync@39772a48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11077 Waited count: 11573 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@5c07e7ee Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@447423ad Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 907 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@1f8d300d-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44301}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 37 Waited count: 2855 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d5da314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44067): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 153 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 44464 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1208 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@746c915 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44067): State: TIMED_WAITING Blocked count: 120 Waited count: 2151 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44067): State: TIMED_WAITING Blocked count: 94 Waited count: 2137 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44067): State: TIMED_WAITING Blocked count: 114 Waited count: 2157 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44067): State: TIMED_WAITING Blocked count: 102 Waited count: 2155 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44067): State: TIMED_WAITING Blocked count: 108 Waited count: 2145 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 226 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1812730390)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp350088835-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp350088835-88-acceptor-0@60ab5dc9-ServerConnector@26b3760d{HTTP/1.1, (http/1.1)}{localhost:45091}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp350088835-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp350088835-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-edcf1f6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@724f8cec): State: TIMED_WAITING Blocked count: 0 Waited count: 900 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 36971): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 310 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a43b7bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1252 Waited count: 1398 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5d4816ad): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 451 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 451 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 453 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 451 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (625843813) connection to localhost/127.0.0.1:44067 from jenkins): State: TIMED_WAITING Blocked count: 1176 Waited count: 1177 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1533570740-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1533570740-122-acceptor-0@515ec047-ServerConnector@2f8fc5e5{HTTP/1.1, (http/1.1)}{localhost:39919}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (IPC Parameter Sending Thread for localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 0 Waited count: 1834 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1533570740-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1533570740-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-3f2947df-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3563764c): State: TIMED_WAITING Blocked count: 0 Waited count: 900 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 37319): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 318 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36855e10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1314 Waited count: 1392 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1a7c72f1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 451 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 453 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 469 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 467 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1278567483-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1278567483-154-acceptor-0@4022ab3e-ServerConnector@1ec9a36b{HTTP/1.1, (http/1.1)}{localhost:43805}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1278567483-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1278567483-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-51f35b56-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@303700ae): State: TIMED_WAITING Blocked count: 0 Waited count: 899 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 46183): State: TIMED_WAITING Blocked count: 1 Waited count: 46 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 292 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@348462c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1229 Waited count: 1365 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@213622aa): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 454 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@38dc0a4e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@1e899c91[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@a38f3a0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60527): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 225 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 318 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@efded71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:60527):): State: WAITING Blocked count: 0 Waited count: 410 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6967c2f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 443 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f457e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7ccd7a57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 366 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:60527)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 22 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b3ea3ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 64 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@421a6557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 6 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5592a081 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577): State: WAITING Blocked count: 117 Waited count: 417 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d3a4ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577): State: WAITING Blocked count: 99 Waited count: 440 Waiting on java.util.concurrent.Semaphore$NonfairSync@53e5aa46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577): State: WAITING Blocked count: 32 Waited count: 6910 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42c44db0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39577): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2927bc1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2927bc1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7db992e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@370a089 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@63b9d49a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@72e4f75f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43fb4563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 315 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 337 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 82 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;fdc0a393186c:39577): State: TIMED_WAITING Blocked count: 12 Waited count: 2671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1103/0x00007fee20f95478.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 360 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 366 (org.apache.hadoop.hdfs.PeerCache@484ad824): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 385 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4452 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 402 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 89 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24fb068d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44470 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 437 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 438 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 461 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@96f57ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 483 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@209430a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ca79557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6625af1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44293 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 540 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 566 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 593 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 993 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 397 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1056 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3fefdbd7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1146 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1471 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@dee3101 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2783 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2784 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4493 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4494 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4495 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8077 (AsyncFSWAL-1-hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData-prefix:fdc0a393186c,39577,1732162937127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ee6f6b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8080 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-21T04:29:46,352 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:30:16,352 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;fdc0a393186c:39577 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 14 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@3700b8b5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58080c77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5166 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.CountDownLatch$Sync@30ad18b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11077 Waited count: 11574 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@5c07e7ee Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@447423ad Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 1027 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@1f8d300d-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44301}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 37 Waited count: 2855 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d5da314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44067): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 173 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 50393 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1208 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@746c915 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44067): State: TIMED_WAITING Blocked count: 120 Waited count: 2211 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44067): State: TIMED_WAITING Blocked count: 94 Waited count: 2197 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44067): State: TIMED_WAITING Blocked count: 114 Waited count: 2218 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44067): State: TIMED_WAITING Blocked count: 102 Waited count: 2215 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44067): State: TIMED_WAITING Blocked count: 108 Waited count: 2207 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 256 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1812730390)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp350088835-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp350088835-88-acceptor-0@60ab5dc9-ServerConnector@26b3760d{HTTP/1.1, (http/1.1)}{localhost:45091}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp350088835-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp350088835-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-edcf1f6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@724f8cec): State: TIMED_WAITING Blocked count: 0 Waited count: 1020 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 36971): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 330 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a43b7bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1272 Waited count: 1438 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5d4816ad): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 511 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 511 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 519 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 515 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 511 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (625843813) connection to localhost/127.0.0.1:44067 from jenkins): State: TIMED_WAITING Blocked count: 1235 Waited count: 1236 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1533570740-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1533570740-122-acceptor-0@515ec047-ServerConnector@2f8fc5e5{HTTP/1.1, (http/1.1)}{localhost:39919}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (IPC Parameter Sending Thread for localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 0 Waited count: 1894 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1533570740-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1533570740-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-3f2947df-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3563764c): State: TIMED_WAITING Blocked count: 0 Waited count: 1020 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 37319): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 338 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36855e10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1334 Waited count: 1432 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1a7c72f1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 511 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1278567483-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1278567483-154-acceptor-0@4022ab3e-ServerConnector@1ec9a36b{HTTP/1.1, (http/1.1)}{localhost:43805}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1278567483-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1278567483-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-51f35b56-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@303700ae): State: TIMED_WAITING Blocked count: 0 Waited count: 1019 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 46183): State: TIMED_WAITING Blocked count: 1 Waited count: 52 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 312 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@348462c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1249 Waited count: 1405 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@213622aa): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 516 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@38dc0a4e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@1e899c91[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@a38f3a0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60527): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 255 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 322 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@efded71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:60527):): State: WAITING Blocked count: 0 Waited count: 414 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6967c2f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 447 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f457e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7ccd7a57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 394 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:60527)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 22 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b3ea3ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 64 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@421a6557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 6 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 82 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5592a081 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577): State: WAITING Blocked count: 117 Waited count: 417 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d3a4ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577): State: WAITING Blocked count: 99 Waited count: 440 Waiting on java.util.concurrent.Semaphore$NonfairSync@53e5aa46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577): State: WAITING Blocked count: 32 Waited count: 6910 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42c44db0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39577): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2927bc1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2927bc1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7db992e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@370a089 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@63b9d49a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@72e4f75f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43fb4563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 315 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 337 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 82 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;fdc0a393186c:39577): State: TIMED_WAITING Blocked count: 12 Waited count: 2671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1103/0x00007fee20f95478.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 360 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 366 (org.apache.hadoop.hdfs.PeerCache@484ad824): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 385 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5051 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 402 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 89 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24fb068d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50472 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 437 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 438 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 461 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@96f57ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 483 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@209430a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ca79557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6625af1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50295 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 540 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 593 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 993 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 403 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1056 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3fefdbd7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1146 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1471 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@dee3101 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2783 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2784 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4493 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4494 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4495 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8077 (AsyncFSWAL-1-hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData-prefix:fdc0a393186c,39577,1732162937127): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ee6f6b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8080 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-21T04:30:46,353 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:31:16,353 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:31:44,454 DEBUG [M:0;fdc0a393186c:39577 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732163204440Disabling compacts and flushes for region at 1732163204440Disabling writes for close at 1732163204452 (+12 ms)Obtaining lock to block concurrent updates at 1732163204452Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732163204452Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=843730, getHeapSize=1012440, getOffHeapSize=0, getCellsCount=2221 at 1732163204452Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1732163504454 (+300002 ms) 2024-11-21T04:31:44,454 WARN [M:0;fdc0a393186c:39577 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3824, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3824, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-21T04:31:44,459 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T04:31:44,461 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-21T04:31:44,462 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-21T04:31:44,462 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/WALs/fdc0a393186c,39577,1732162937127/fdc0a393186c%2C39577%2C1732162937127.1732162938960 2024-11-21T04:31:44,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/WALs/fdc0a393186c,39577,1732162937127/fdc0a393186c%2C39577%2C1732162937127.1732162938960 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T04:31:44,464 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T04:31:44,464 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/WALs/fdc0a393186c,39577,1732162937127/fdc0a393186c%2C39577%2C1732162937127.1732162938960 2024-11-21T04:31:44,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/WALs/fdc0a393186c,39577,1732162937127/fdc0a393186c%2C39577%2C1732162937127.1732162938960 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;fdc0a393186c:39577 224 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 14 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@3700b8b5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58080c77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5765 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 58 Waiting on java.util.concurrent.CountDownLatch$Sync@5da203b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11077 Waited count: 11575 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 9 Waited count: 10 Waiting on java.lang.ref.ReferenceQueue$Lock@5c07e7ee Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@447423ad Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 1146 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@1f8d300d-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44301}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 37 Waited count: 2855 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d5da314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44067): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 193 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 193 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 56318 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1208 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@746c915 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44067): State: TIMED_WAITING Blocked count: 120 Waited count: 2271 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44067): State: TIMED_WAITING Blocked count: 94 Waited count: 2257 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44067): State: TIMED_WAITING Blocked count: 115 Waited count: 2278 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44067): State: TIMED_WAITING Blocked count: 102 Waited count: 2275 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44067): State: TIMED_WAITING Blocked count: 108 Waited count: 2267 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 286 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1812730390)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp350088835-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp350088835-88-acceptor-0@60ab5dc9-ServerConnector@26b3760d{HTTP/1.1, (http/1.1)}{localhost:45091}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp350088835-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp350088835-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-edcf1f6-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@724f8cec): State: TIMED_WAITING Blocked count: 0 Waited count: 1140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 36971): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 350 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a43b7bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1292 Waited count: 1478 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5d4816ad): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 571 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 571 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 579 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 575 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 36971): State: TIMED_WAITING Blocked count: 0 Waited count: 571 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (625843813) connection to localhost/127.0.0.1:44067 from jenkins): State: TIMED_WAITING Blocked count: 1287 Waited count: 1288 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp1533570740-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1533570740-122-acceptor-0@515ec047-ServerConnector@2f8fc5e5{HTTP/1.1, (http/1.1)}{localhost:39919}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (IPC Parameter Sending Thread for localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 0 Waited count: 1953 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1533570740-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1533570740-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-3f2947df-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@3563764c): State: TIMED_WAITING Blocked count: 0 Waited count: 1140 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 37319): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36855e10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1354 Waited count: 1473 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1a7c72f1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 571 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 592 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 37319): State: TIMED_WAITING Blocked count: 0 Waited count: 603 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp1278567483-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fee2042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp1278567483-154-acceptor-0@4022ab3e-ServerConnector@1ec9a36b{HTTP/1.1, (http/1.1)}{localhost:43805}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp1278567483-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp1278567483-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-51f35b56-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@303700ae): State: TIMED_WAITING Blocked count: 0 Waited count: 1139 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 46183): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 332 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@348462c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067): State: TIMED_WAITING Blocked count: 1269 Waited count: 1445 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@213622aa): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 574 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 46183): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4)): State: TIMED_WAITING Blocked count: 7 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (java.util.concurrent.ThreadPoolExecutor$Worker@38dc0a4e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (java.util.concurrent.ThreadPoolExecutor$Worker@1e899c91[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5/current/BP-829356940-172.17.0.2-1732162930732): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (java.util.concurrent.ThreadPoolExecutor$Worker@a38f3a0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60527): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 239 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 243 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 285 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 244 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 327 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@efded71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 245 (ProcessThread(sid:0 cport:60527):): State: WAITING Blocked count: 0 Waited count: 419 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6967c2f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 246 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 452 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a5f457e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 247 (NIOWorkerThread-1): State: WAITING Blocked count: 6 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7ccd7a57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 16 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (Time-limited test-SendThread(127.0.0.1:60527)): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 262 (Time-limited test-EventThread): State: WAITING Blocked count: 22 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b3ea3ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 263 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (zk-event-processor-pool-0): State: WAITING Blocked count: 24 Waited count: 64 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@421a6557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-7): State: WAITING Blocked count: 6 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-16): State: WAITING Blocked count: 6 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dba8af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5592a081 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39577): State: WAITING Blocked count: 117 Waited count: 417 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d3a4ddc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39577): State: WAITING Blocked count: 99 Waited count: 440 Waiting on java.util.concurrent.Semaphore$NonfairSync@53e5aa46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39577): State: WAITING Blocked count: 32 Waited count: 6910 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42c44db0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39577): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2927bc1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2927bc1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7db992e2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@370a089 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@63b9d49a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39577): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@72e4f75f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 292 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43fb4563 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 293 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 315 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 337 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 82 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (M:0;fdc0a393186c:39577): State: TIMED_WAITING Blocked count: 12 Waited count: 2672 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1356/0x00007fee212011d0.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 364 (master/fdc0a393186c:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 366 (org.apache.hadoop.hdfs.PeerCache@484ad824): State: TIMED_WAITING Blocked count: 0 Waited count: 189 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 385 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5650 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 402 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 89 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 143 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24fb068d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56474 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 437 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 438 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 461 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@96f57ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 483 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@209430a6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ca79557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/fdc0a393186c:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6625af1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56296 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 540 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 593 (region-location-1): State: WAITING Blocked count: 6 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 993 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 409 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1056 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1087 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3fefdbd7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1146 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1471 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@dee3101 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2783 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2784 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1504e6e5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4493 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4494 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4495 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8077 (AsyncFSWAL-1-hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData-prefix:fdc0a393186c,39577,1732162937127): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ee6f6b2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8080 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8081 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8084 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8085 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1340/0x00007fee211f9110.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-21T04:31:46,353 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T04:31:48,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/WALs/fdc0a393186c,39577,1732162937127/fdc0a393186c%2C39577%2C1732162937127.1732162938960 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T04:31:49,459 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-21T04:31:49,460 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T04:31:49,460 INFO [M:0;fdc0a393186c:39577 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T04:31:49,460 INFO [M:0;fdc0a393186c:39577 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39577 2024-11-21T04:31:49,461 INFO [M:0;fdc0a393186c:39577 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T04:31:49,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44067/user/jenkins/test-data/b57f4519-60a7-ea26-a435-c60e3064a18a/MasterData/WALs/fdc0a393186c,39577,1732162937127/fdc0a393186c%2C39577%2C1732162937127.1732162938960 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-21T04:31:49,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T04:31:49,625 INFO [M:0;fdc0a393186c:39577 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T04:31:49,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39577-0x1015cea71210000, quorum=127.0.0.1:60527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T04:31:49,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@606c6d1d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T04:31:49,678 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ec9a36b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T04:31:49,678 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T04:31:49,678 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f667f57{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T04:31:49,679 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@451d3a5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,STOPPED} 2024-11-21T04:31:49,681 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T04:31:49,681 WARN [BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T04:31:49,681 WARN [BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-829356940-172.17.0.2-1732162930732 (Datanode Uuid 18c6b082-ea3f-4d32-a3a1-24c94ee7a26d) service to localhost/127.0.0.1:44067 2024-11-21T04:31:49,681 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T04:31:49,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data5/current/BP-829356940-172.17.0.2-1732162930732 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T04:31:49,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data6/current/BP-829356940-172.17.0.2-1732162930732 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T04:31:49,684 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T04:31:49,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1cca72f0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T04:31:49,686 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2f8fc5e5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T04:31:49,686 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T04:31:49,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f60738e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T04:31:49,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76c81bc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,STOPPED} 2024-11-21T04:31:49,687 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T04:31:49,688 WARN [BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T04:31:49,688 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T04:31:49,688 WARN [BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-829356940-172.17.0.2-1732162930732 (Datanode Uuid a1cb1ef9-fba7-4559-8d3c-2e34a1f1e090) service to localhost/127.0.0.1:44067 2024-11-21T04:31:49,688 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data3/current/BP-829356940-172.17.0.2-1732162930732 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T04:31:49,689 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data4/current/BP-829356940-172.17.0.2-1732162930732 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T04:31:49,689 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T04:31:49,691 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@746b5398{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T04:31:49,691 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@26b3760d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T04:31:49,691 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T04:31:49,691 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18d9bb98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T04:31:49,691 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11dede43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,STOPPED} 2024-11-21T04:31:49,692 WARN [BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T04:31:49,692 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T04:31:49,692 WARN [BP-829356940-172.17.0.2-1732162930732 heartbeating to localhost/127.0.0.1:44067 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-829356940-172.17.0.2-1732162930732 (Datanode Uuid 7aba5fb8-7d9b-4bdf-a725-ba58273c4411) service to localhost/127.0.0.1:44067 2024-11-21T04:31:49,692 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T04:31:49,693 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data1/current/BP-829356940-172.17.0.2-1732162930732 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T04:31:49,693 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/cluster_735df1a6-2ad5-0484-cf6a-f261e6ad7424/data/data2/current/BP-829356940-172.17.0.2-1732162930732 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T04:31:49,693 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T04:31:49,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a55f3e1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T04:31:49,701 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T04:31:49,701 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T04:31:49,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d167fe8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T04:31:49,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74e30e0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/34cf93cb-bfb5-e10e-39bb-f0067d175ece/hadoop.log.dir/,STOPPED} 2024-11-21T04:31:49,713 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T04:31:49,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down